1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
52 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
54 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
55 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
57 static rtx
emit_conditional_move_1 (rtx
, rtx
, rtx
, rtx
, machine_mode
);
59 /* Debug facility for use in GDB. */
60 void debug_optab_libfuncs (void);
62 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
63 the result of operation CODE applied to OP0 (and OP1 if it is a binary
64 operation). OP0_MODE is OP0's mode.
66 If the last insn does not set TARGET, don't do anything, but return 1.
68 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
69 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
70 try again, ensuring that TARGET is not one of the operands. */
73 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
,
74 rtx op1
, machine_mode op0_mode
)
80 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
82 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
83 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
84 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
85 && GET_RTX_CLASS (code
) != RTX_COMPARE
86 && GET_RTX_CLASS (code
) != RTX_UNARY
)
89 if (GET_CODE (target
) == ZERO_EXTRACT
)
92 for (last_insn
= insns
;
93 NEXT_INSN (last_insn
) != NULL_RTX
;
94 last_insn
= NEXT_INSN (last_insn
))
97 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
98 a value changing in the insn, so the note would be invalid for CSE. */
99 if (reg_overlap_mentioned_p (target
, op0
)
100 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
103 && (rtx_equal_p (target
, op0
)
104 || (op1
&& rtx_equal_p (target
, op1
))))
106 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
107 over expanding it as temp = MEM op X, MEM = temp. If the target
108 supports MEM = MEM op X instructions, it is sometimes too hard
109 to reconstruct that form later, especially if X is also a memory,
110 and due to multiple occurrences of addresses the address might
111 be forced into register unnecessarily.
112 Note that not emitting the REG_EQUIV note might inhibit
113 CSE in some cases. */
114 set
= single_set (last_insn
);
116 && GET_CODE (SET_SRC (set
)) == code
117 && MEM_P (SET_DEST (set
))
118 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
119 || (op1
&& rtx_equal_p (SET_DEST (set
),
120 XEXP (SET_SRC (set
), 1)))))
126 set
= set_for_reg_notes (last_insn
);
130 if (! rtx_equal_p (SET_DEST (set
), target
)
131 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
132 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
133 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
136 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
146 if (op0_mode
!= VOIDmode
&& GET_MODE (target
) != op0_mode
)
148 note
= gen_rtx_fmt_e (code
, op0_mode
, copy_rtx (op0
));
149 if (GET_MODE_UNIT_SIZE (op0_mode
)
150 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
151 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
154 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
160 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
164 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
166 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
171 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
172 for a widening operation would be. In most cases this would be OP0, but if
173 that's a constant it'll be VOIDmode, which isn't useful. */
176 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
178 machine_mode m0
= GET_MODE (op0
);
179 machine_mode m1
= GET_MODE (op1
);
182 if (m0
== VOIDmode
&& m1
== VOIDmode
)
184 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
189 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
195 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
196 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
197 not actually do a sign-extend or zero-extend, but can leave the
198 higher-order bits of the result rtx undefined, for example, in the case
199 of logical operations, but not right shifts. */
202 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
203 int unsignedp
, int no_extend
)
206 scalar_int_mode int_mode
;
208 /* If we don't have to extend and this is a constant, return it. */
209 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
212 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
213 extend since it will be more efficient to do so unless the signedness of
214 a promoted object differs from our extension. */
216 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
217 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
218 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
219 return convert_modes (mode
, oldmode
, op
, unsignedp
);
221 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
223 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
224 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
226 /* Otherwise, get an object of MODE, clobber it, and set the low-order
229 result
= gen_reg_rtx (int_mode
);
230 emit_clobber (result
);
231 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
235 /* Expand vector widening operations.
237 There are two different classes of operations handled here:
238 1) Operations whose result is wider than all the arguments to the operation.
239 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
240 In this case OP0 and optionally OP1 would be initialized,
241 but WIDE_OP wouldn't (not relevant for this case).
242 2) Operations whose result is of the same size as the last argument to the
243 operation, but wider than all the other arguments to the operation.
244 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
245 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
247 E.g, when called to expand the following operations, this is how
248 the arguments will be initialized:
250 widening-sum 2 oprnd0 - oprnd1
251 widening-dot-product 3 oprnd0 oprnd1 oprnd2
252 widening-mult 2 oprnd0 oprnd1 -
253 type-promotion (vec-unpack) 1 oprnd0 - - */
256 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
257 rtx target
, int unsignedp
)
259 class expand_operand eops
[4];
260 tree oprnd0
, oprnd1
, oprnd2
;
261 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
262 optab widen_pattern_optab
;
263 enum insn_code icode
;
264 int nops
= TREE_CODE_LENGTH (ops
->code
);
269 oprnd1
= nops
>= 2 ? ops
->op1
: NULL_TREE
;
270 oprnd2
= nops
>= 3 ? ops
->op2
: NULL_TREE
;
272 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
273 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
274 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
275 /* The sign is from the result type rather than operand's type
278 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
279 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
280 || ops
->code
== VEC_UNPACK_LO_EXPR
)
281 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
282 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
283 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
284 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
286 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
287 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
288 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
289 the pattern number of elements in the wider vector. */
291 = (ops
->code
== VEC_UNPACK_HI_EXPR
292 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
295 else if (ops
->code
== DOT_PROD_EXPR
)
297 enum optab_subtype subtype
= optab_default
;
298 signop sign1
= TYPE_SIGN (TREE_TYPE (oprnd0
));
299 signop sign2
= TYPE_SIGN (TREE_TYPE (oprnd1
));
302 else if (sign1
== SIGNED
&& sign2
== UNSIGNED
)
304 subtype
= optab_vector_mixed_sign
;
305 /* Same as optab_vector_mixed_sign but flip the operands. */
306 std::swap (op0
, op1
);
308 else if (sign1
== UNSIGNED
&& sign2
== SIGNED
)
309 subtype
= optab_vector_mixed_sign
;
314 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), subtype
);
318 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
319 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
320 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
321 icode
= find_widening_optab_handler (widen_pattern_optab
,
322 TYPE_MODE (TREE_TYPE (ops
->op2
)),
325 icode
= optab_handler (widen_pattern_optab
, tmode0
);
326 gcc_assert (icode
!= CODE_FOR_nothing
);
329 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
333 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
337 /* The last operand is of a wider mode than the rest of the operands. */
342 gcc_assert (tmode1
== tmode0
);
344 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
348 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
349 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
351 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
353 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
354 expand_insn (icode
, op
, eops
);
355 return eops
[0].value
;
358 /* Generate code to perform an operation specified by TERNARY_OPTAB
359 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
361 UNSIGNEDP is for the case where we have to widen the operands
362 to perform the operation. It says to use zero-extension.
364 If TARGET is nonzero, the value
365 is generated there, if it is convenient to do so.
366 In all cases an rtx is returned for the locus of the value;
367 this may or may not be TARGET. */
370 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
371 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
373 class expand_operand ops
[4];
374 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
376 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
378 create_output_operand (&ops
[0], target
, mode
);
379 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
380 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
381 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
382 expand_insn (icode
, 4, ops
);
387 /* Like expand_binop, but return a constant rtx if the result can be
388 calculated at compile time. The arguments and return value are
389 otherwise the same as for expand_binop. */
392 simplify_expand_binop (machine_mode mode
, optab binoptab
,
393 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
394 enum optab_methods methods
)
396 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
398 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
404 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
407 /* Like simplify_expand_binop, but always put the result in TARGET.
408 Return true if the expansion succeeded. */
411 force_expand_binop (machine_mode mode
, optab binoptab
,
412 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
413 enum optab_methods methods
)
415 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
416 target
, unsignedp
, methods
);
420 emit_move_insn (target
, x
);
424 /* Create a new vector value in VMODE with all elements set to OP. The
425 mode of OP must be the element mode of VMODE. If OP is a constant,
426 then the return value will be a constant. */
429 expand_vector_broadcast (machine_mode vmode
, rtx op
)
434 gcc_checking_assert (VECTOR_MODE_P (vmode
));
436 if (valid_for_const_vector_p (vmode
, op
))
437 return gen_const_vec_duplicate (vmode
, op
);
439 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
440 if (icode
!= CODE_FOR_nothing
)
442 class expand_operand ops
[2];
443 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
444 create_input_operand (&ops
[1], op
, GET_MODE (op
));
445 expand_insn (icode
, 2, ops
);
449 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
452 /* ??? If the target doesn't have a vec_init, then we have no easy way
453 of performing this operation. Most of this sort of generic support
454 is hidden away in the vector lowering support in gimple. */
455 icode
= convert_optab_handler (vec_init_optab
, vmode
,
456 GET_MODE_INNER (vmode
));
457 if (icode
== CODE_FOR_nothing
)
460 vec
= rtvec_alloc (n
);
461 for (int i
= 0; i
< n
; ++i
)
462 RTVEC_ELT (vec
, i
) = op
;
463 rtx ret
= gen_reg_rtx (vmode
);
464 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
469 /* This subroutine of expand_doubleword_shift handles the cases in which
470 the effective shift value is >= BITS_PER_WORD. The arguments and return
471 value are the same as for the parent routine, except that SUPERWORD_OP1
472 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
473 INTO_TARGET may be null if the caller has decided to calculate it. */
476 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
477 rtx outof_target
, rtx into_target
,
478 int unsignedp
, enum optab_methods methods
)
480 if (into_target
!= 0)
481 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
482 into_target
, unsignedp
, methods
))
485 if (outof_target
!= 0)
487 /* For a signed right shift, we must fill OUTOF_TARGET with copies
488 of the sign bit, otherwise we must fill it with zeros. */
489 if (binoptab
!= ashr_optab
)
490 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
492 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
493 gen_int_shift_amount (word_mode
,
495 outof_target
, unsignedp
, methods
))
501 /* This subroutine of expand_doubleword_shift handles the cases in which
502 the effective shift value is < BITS_PER_WORD. The arguments and return
503 value are the same as for the parent routine. */
506 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
507 rtx outof_input
, rtx into_input
, rtx op1
,
508 rtx outof_target
, rtx into_target
,
509 int unsignedp
, enum optab_methods methods
,
510 unsigned HOST_WIDE_INT shift_mask
)
512 optab reverse_unsigned_shift
, unsigned_shift
;
515 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
516 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
518 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
519 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
520 the opposite direction to BINOPTAB. */
521 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
523 carries
= outof_input
;
524 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
525 op1_mode
), op1_mode
);
526 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
531 /* We must avoid shifting by BITS_PER_WORD bits since that is either
532 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
533 has unknown behavior. Do a single shift first, then shift by the
534 remainder. It's OK to use ~OP1 as the remainder if shift counts
535 are truncated to the mode size. */
536 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
537 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
538 if (shift_mask
== BITS_PER_WORD
- 1)
540 tmp
= immed_wide_int_const
541 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
542 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
547 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
548 op1_mode
), op1_mode
);
549 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
553 if (tmp
== 0 || carries
== 0)
555 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
556 carries
, tmp
, 0, unsignedp
, methods
);
560 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
561 so the result can go directly into INTO_TARGET if convenient. */
562 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
563 into_target
, unsignedp
, methods
);
567 /* Now OR in the bits carried over from OUTOF_INPUT. */
568 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
569 into_target
, unsignedp
, methods
))
572 /* Use a standard word_mode shift for the out-of half. */
573 if (outof_target
!= 0)
574 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
575 outof_target
, unsignedp
, methods
))
582 /* Try implementing expand_doubleword_shift using conditional moves.
583 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
584 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
585 are the shift counts to use in the former and latter case. All other
586 arguments are the same as the parent routine. */
589 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
590 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
591 rtx outof_input
, rtx into_input
,
592 rtx subword_op1
, rtx superword_op1
,
593 rtx outof_target
, rtx into_target
,
594 int unsignedp
, enum optab_methods methods
,
595 unsigned HOST_WIDE_INT shift_mask
)
597 rtx outof_superword
, into_superword
;
599 /* Put the superword version of the output into OUTOF_SUPERWORD and
601 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
602 if (outof_target
!= 0 && subword_op1
== superword_op1
)
604 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
605 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
606 into_superword
= outof_target
;
607 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
608 outof_superword
, 0, unsignedp
, methods
))
613 into_superword
= gen_reg_rtx (word_mode
);
614 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
615 outof_superword
, into_superword
,
620 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
621 if (!expand_subword_shift (op1_mode
, binoptab
,
622 outof_input
, into_input
, subword_op1
,
623 outof_target
, into_target
,
624 unsignedp
, methods
, shift_mask
))
627 /* Select between them. Do the INTO half first because INTO_SUPERWORD
628 might be the current value of OUTOF_TARGET. */
629 if (!emit_conditional_move (into_target
, { cmp_code
, cmp1
, cmp2
, op1_mode
},
630 into_target
, into_superword
, word_mode
, false))
633 if (outof_target
!= 0)
634 if (!emit_conditional_move (outof_target
,
635 { cmp_code
, cmp1
, cmp2
, op1_mode
},
636 outof_target
, outof_superword
,
643 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
644 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
645 input operand; the shift moves bits in the direction OUTOF_INPUT->
646 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
647 of the target. OP1 is the shift count and OP1_MODE is its mode.
648 If OP1 is constant, it will have been truncated as appropriate
649 and is known to be nonzero.
651 If SHIFT_MASK is zero, the result of word shifts is undefined when the
652 shift count is outside the range [0, BITS_PER_WORD). This routine must
653 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
655 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
656 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
657 fill with zeros or sign bits as appropriate.
659 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
660 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
661 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
662 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
665 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
666 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
667 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
668 function wants to calculate it itself.
670 Return true if the shift could be successfully synthesized. */
673 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
674 rtx outof_input
, rtx into_input
, rtx op1
,
675 rtx outof_target
, rtx into_target
,
676 int unsignedp
, enum optab_methods methods
,
677 unsigned HOST_WIDE_INT shift_mask
)
679 rtx superword_op1
, tmp
, cmp1
, cmp2
;
680 enum rtx_code cmp_code
;
682 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
683 fill the result with sign or zero bits as appropriate. If so, the value
684 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
685 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
686 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
688 This isn't worthwhile for constant shifts since the optimizers will
689 cope better with in-range shift counts. */
690 if (shift_mask
>= BITS_PER_WORD
692 && !CONSTANT_P (op1
))
694 if (!expand_doubleword_shift (op1_mode
, binoptab
,
695 outof_input
, into_input
, op1
,
697 unsignedp
, methods
, shift_mask
))
699 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
700 outof_target
, unsignedp
, methods
))
705 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
706 is true when the effective shift value is less than BITS_PER_WORD.
707 Set SUPERWORD_OP1 to the shift count that should be used to shift
708 OUTOF_INPUT into INTO_TARGET when the condition is false. */
709 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
710 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
712 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
713 is a subword shift count. */
714 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
716 cmp2
= CONST0_RTX (op1_mode
);
722 /* Set CMP1 to OP1 - BITS_PER_WORD. */
723 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
725 cmp2
= CONST0_RTX (op1_mode
);
727 superword_op1
= cmp1
;
732 /* If we can compute the condition at compile time, pick the
733 appropriate subroutine. */
734 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
735 if (tmp
!= 0 && CONST_INT_P (tmp
))
737 if (tmp
== const0_rtx
)
738 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
739 outof_target
, into_target
,
742 return expand_subword_shift (op1_mode
, binoptab
,
743 outof_input
, into_input
, op1
,
744 outof_target
, into_target
,
745 unsignedp
, methods
, shift_mask
);
748 /* Try using conditional moves to generate straight-line code. */
749 if (HAVE_conditional_move
)
751 rtx_insn
*start
= get_last_insn ();
752 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
753 cmp_code
, cmp1
, cmp2
,
754 outof_input
, into_input
,
756 outof_target
, into_target
,
757 unsignedp
, methods
, shift_mask
))
759 delete_insns_since (start
);
762 /* As a last resort, use branches to select the correct alternative. */
763 rtx_code_label
*subword_label
= gen_label_rtx ();
764 rtx_code_label
*done_label
= gen_label_rtx ();
767 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
769 profile_probability::uninitialized ());
772 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
773 outof_target
, into_target
,
777 emit_jump_insn (targetm
.gen_jump (done_label
));
779 emit_label (subword_label
);
781 if (!expand_subword_shift (op1_mode
, binoptab
,
782 outof_input
, into_input
, op1
,
783 outof_target
, into_target
,
784 unsignedp
, methods
, shift_mask
))
787 emit_label (done_label
);
791 /* Subroutine of expand_binop. Perform a double word multiplication of
792 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
793 as the target's word_mode. This function return NULL_RTX if anything
794 goes wrong, in which case it may have already emitted instructions
795 which need to be deleted.
797 If we want to multiply two two-word values and have normal and widening
798 multiplies of single-word values, we can do this with three smaller
801 The multiplication proceeds as follows:
802 _______________________
803 [__op0_high_|__op0_low__]
804 _______________________
805 * [__op1_high_|__op1_low__]
806 _______________________________________________
807 _______________________
808 (1) [__op0_low__*__op1_low__]
809 _______________________
810 (2a) [__op0_low__*__op1_high_]
811 _______________________
812 (2b) [__op0_high_*__op1_low__]
813 _______________________
814 (3) [__op0_high_*__op1_high_]
817 This gives a 4-word result. Since we are only interested in the
818 lower 2 words, partial result (3) and the upper words of (2a) and
819 (2b) don't need to be calculated. Hence (2a) and (2b) can be
820 calculated using non-widening multiplication.
822 (1), however, needs to be calculated with an unsigned widening
823 multiplication. If this operation is not directly supported we
824 try using a signed widening multiplication and adjust the result.
825 This adjustment works as follows:
827 If both operands are positive then no adjustment is needed.
829 If the operands have different signs, for example op0_low < 0 and
830 op1_low >= 0, the instruction treats the most significant bit of
831 op0_low as a sign bit instead of a bit with significance
832 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
833 with 2**BITS_PER_WORD - op0_low, and two's complements the
834 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
837 Similarly, if both operands are negative, we need to add
838 (op0_low + op1_low) * 2**BITS_PER_WORD.
840 We use a trick to adjust quickly. We logically shift op0_low right
841 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
842 op0_high (op1_high) before it is used to calculate 2b (2a). If no
843 logical shift exists, we do an arithmetic right shift and subtract
847 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
848 bool umulp
, enum optab_methods methods
)
850 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
851 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
852 rtx wordm1
= (umulp
? NULL_RTX
853 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
854 rtx product
, adjust
, product_high
, temp
;
856 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
857 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
858 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
859 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
861 /* If we're using an unsigned multiply to directly compute the product
862 of the low-order words of the operands and perform any required
863 adjustments of the operands, we begin by trying two more multiplications
864 and then computing the appropriate sum.
866 We have checked above that the required addition is provided.
867 Full-word addition will normally always succeed, especially if
868 it is provided at all, so we don't worry about its failure. The
869 multiplication may well fail, however, so we do handle that. */
873 /* ??? This could be done with emit_store_flag where available. */
874 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
875 NULL_RTX
, 1, methods
);
877 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
878 NULL_RTX
, 0, OPTAB_DIRECT
);
881 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
882 NULL_RTX
, 0, methods
);
885 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
886 NULL_RTX
, 0, OPTAB_DIRECT
);
893 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
894 NULL_RTX
, 0, OPTAB_DIRECT
);
898 /* OP0_HIGH should now be dead. */
902 /* ??? This could be done with emit_store_flag where available. */
903 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
904 NULL_RTX
, 1, methods
);
906 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
907 NULL_RTX
, 0, OPTAB_DIRECT
);
910 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
911 NULL_RTX
, 0, methods
);
914 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
915 NULL_RTX
, 0, OPTAB_DIRECT
);
922 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
923 NULL_RTX
, 0, OPTAB_DIRECT
);
927 /* OP1_HIGH should now be dead. */
929 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
930 NULL_RTX
, 0, OPTAB_DIRECT
);
932 if (target
&& !REG_P (target
))
935 /* *_widen_optab needs to determine operand mode, make sure at least
936 one operand has non-VOID mode. */
937 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
938 op0_low
= force_reg (word_mode
, op0_low
);
941 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
942 target
, 1, OPTAB_DIRECT
);
944 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
945 target
, 1, OPTAB_DIRECT
);
950 product_high
= operand_subword (product
, high
, 1, mode
);
951 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
952 NULL_RTX
, 0, OPTAB_DIRECT
);
953 emit_move_insn (product_high
, adjust
);
957 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
958 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
959 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
960 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
961 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
962 depends on the bit value, if 2, then carry from the addition needs to be
963 added too, i.e. like:
964 sum += __builtin_add_overflow (low, high, &sum)
966 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
967 factor to the sum before doing unsigned remainder, in the form of
968 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
969 then perform unsigned
970 remainder = sum % OP1;
972 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
975 expand_doubleword_mod (machine_mode mode
, rtx op0
, rtx op1
, bool unsignedp
)
977 if (INTVAL (op1
) <= 1 || (INTVAL (op1
) & 1) == 0)
980 rtx_insn
*last
= get_last_insn ();
981 for (int bit
= BITS_PER_WORD
; bit
>= BITS_PER_WORD
/ 2; bit
--)
983 wide_int w
= wi::shifted_mask (bit
, 1, false, 2 * BITS_PER_WORD
);
984 if (wi::ne_p (wi::umod_trunc (w
, INTVAL (op1
)), 1))
986 rtx sum
= NULL_RTX
, mask
= NULL_RTX
;
987 if (bit
== BITS_PER_WORD
)
989 /* For signed modulo we need to add correction to the sum
990 and that might again overflow. */
993 if (optab_handler (uaddv4_optab
, word_mode
) == CODE_FOR_nothing
)
995 tree wtype
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
996 if (wtype
== NULL_TREE
)
998 tree ctype
= build_complex_type (wtype
);
999 if (TYPE_MODE (ctype
) != GET_MODE_COMPLEX_MODE (word_mode
))
1001 machine_mode cmode
= TYPE_MODE (ctype
);
1002 rtx op00
= operand_subword_force (op0
, 0, mode
);
1003 rtx op01
= operand_subword_force (op0
, 1, mode
);
1004 rtx cres
= gen_rtx_CONCAT (cmode
, gen_reg_rtx (word_mode
),
1005 gen_reg_rtx (word_mode
));
1006 tree lhs
= make_tree (ctype
, cres
);
1007 tree arg0
= make_tree (wtype
, op00
);
1008 tree arg1
= make_tree (wtype
, op01
);
1009 expand_addsub_overflow (UNKNOWN_LOCATION
, PLUS_EXPR
, lhs
, arg0
,
1010 arg1
, true, true, true, false, NULL
);
1011 sum
= expand_simple_binop (word_mode
, PLUS
, XEXP (cres
, 0),
1012 XEXP (cres
, 1), NULL_RTX
, 1,
1014 if (sum
== NULL_RTX
)
1019 /* Code below uses GEN_INT, so we need the masks to be representable
1020 in HOST_WIDE_INTs. */
1021 if (bit
>= HOST_BITS_PER_WIDE_INT
)
1023 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
1024 overflow. Consider 64-bit -1ULL for word size 32, if we add
1025 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1026 if (bit
== BITS_PER_WORD
- 1)
1029 int count
= (2 * BITS_PER_WORD
+ bit
- 1) / bit
;
1030 rtx sum_corr
= NULL_RTX
;
1034 /* For signed modulo, compute it as unsigned modulo of
1035 sum with a correction added to it if OP0 is negative,
1036 such that the result can be computed as unsigned
1037 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1038 w
= wi::min_value (2 * BITS_PER_WORD
, SIGNED
);
1039 wide_int wmod1
= wi::umod_trunc (w
, INTVAL (op1
));
1040 wide_int wmod2
= wi::smod_trunc (w
, INTVAL (op1
));
1041 /* wmod2 == -wmod1. */
1042 wmod2
= wmod2
+ (INTVAL (op1
) - 1);
1043 if (wi::ne_p (wmod1
, wmod2
))
1045 wide_int wcorr
= wmod2
- wmod1
;
1047 wcorr
= wcorr
+ INTVAL (op1
);
1048 /* Now verify if the count sums can't overflow, and punt
1050 w
= wi::mask (bit
, false, 2 * BITS_PER_WORD
);
1051 w
= w
* (count
- 1);
1052 w
= w
+ wi::mask (2 * BITS_PER_WORD
- (count
- 1) * bit
,
1053 false, 2 * BITS_PER_WORD
);
1055 w
= wi::lrshift (w
, BITS_PER_WORD
);
1056 if (wi::ne_p (w
, 0))
1059 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1061 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1062 GEN_INT (BITS_PER_WORD
- 1),
1063 NULL_RTX
, 0, OPTAB_DIRECT
);
1064 if (mask
== NULL_RTX
)
1066 sum_corr
= immed_wide_int_const (wcorr
, word_mode
);
1067 sum_corr
= expand_simple_binop (word_mode
, AND
, mask
,
1068 sum_corr
, NULL_RTX
, 1,
1070 if (sum_corr
== NULL_RTX
)
1075 for (int i
= 0; i
< count
; i
++)
1079 v
= expand_simple_binop (mode
, LSHIFTRT
, v
, GEN_INT (i
* bit
),
1080 NULL_RTX
, 1, OPTAB_DIRECT
);
1083 v
= lowpart_subreg (word_mode
, v
, mode
);
1087 v
= expand_simple_binop (word_mode
, AND
, v
,
1088 GEN_INT ((HOST_WIDE_INT_1U
<< bit
)
1093 if (sum
== NULL_RTX
)
1096 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, v
, NULL_RTX
,
1098 if (sum
== NULL_RTX
)
1103 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, sum_corr
,
1104 NULL_RTX
, 1, OPTAB_DIRECT
);
1105 if (sum
== NULL_RTX
)
1109 rtx remainder
= expand_divmod (1, TRUNC_MOD_EXPR
, word_mode
, NULL
, NULL
,
1110 sum
, gen_int_mode (INTVAL (op1
),
1112 NULL_RTX
, 1, OPTAB_DIRECT
);
1113 if (remainder
== NULL_RTX
)
1118 if (mask
== NULL_RTX
)
1120 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1122 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1123 GEN_INT (BITS_PER_WORD
- 1),
1124 NULL_RTX
, 0, OPTAB_DIRECT
);
1125 if (mask
== NULL_RTX
)
1128 mask
= expand_simple_binop (word_mode
, AND
, mask
,
1129 gen_int_mode (1 - INTVAL (op1
),
1131 NULL_RTX
, 1, OPTAB_DIRECT
);
1132 if (mask
== NULL_RTX
)
1134 remainder
= expand_simple_binop (word_mode
, PLUS
, remainder
,
1135 mask
, NULL_RTX
, 1, OPTAB_DIRECT
);
1136 if (remainder
== NULL_RTX
)
1140 remainder
= convert_modes (mode
, word_mode
, remainder
, unsignedp
);
1141 /* Punt if we need any library calls. */
1143 last
= NEXT_INSN (last
);
1145 last
= get_insns ();
1146 for (; last
; last
= NEXT_INSN (last
))
1154 /* Similarly to the above function, but compute both quotient and remainder.
1155 Quotient can be computed from the remainder as:
1156 rem = op0 % op1; // Handled using expand_doubleword_mod
1157 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1158 // 2 * BITS_PER_WORD
1160 We can also handle cases where op1 is a multiple of power of two constant
1161 and constant handled by expand_doubleword_mod.
1162 op11 = 1 << __builtin_ctz (op1);
1164 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1165 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1166 // 2 * BITS_PER_WORD
1167 rem = (quot1 % op11) * op12 + rem1;
1168 quot = quot1 / op11; */
1171 expand_doubleword_divmod (machine_mode mode
, rtx op0
, rtx op1
, rtx
*rem
,
1176 /* Negative dividend should have been optimized into positive,
1177 similarly modulo by 1 and modulo by power of two is optimized
1179 if (INTVAL (op1
) <= 1 || pow2p_hwi (INTVAL (op1
)))
1182 rtx op11
= const1_rtx
;
1184 if ((INTVAL (op1
) & 1) == 0)
1186 int bit
= ctz_hwi (INTVAL (op1
));
1187 op11
= GEN_INT (HOST_WIDE_INT_1
<< bit
);
1188 op12
= GEN_INT (INTVAL (op1
) >> bit
);
1191 rtx rem1
= expand_doubleword_mod (mode
, op0
, op12
, unsignedp
);
1192 if (rem1
== NULL_RTX
)
1195 int prec
= 2 * BITS_PER_WORD
;
1196 wide_int a
= wide_int::from (INTVAL (op12
), prec
+ 1, UNSIGNED
);
1197 wide_int b
= wi::shifted_mask (prec
, 1, false, prec
+ 1);
1198 wide_int m
= wide_int::from (wi::mod_inv (a
, b
), prec
, UNSIGNED
);
1199 rtx inv
= immed_wide_int_const (m
, mode
);
1201 rtx_insn
*last
= get_last_insn ();
1202 rtx quot1
= expand_simple_binop (mode
, MINUS
, op0
, rem1
,
1203 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1204 if (quot1
== NULL_RTX
)
1207 quot1
= expand_simple_binop (mode
, MULT
, quot1
, inv
,
1208 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1209 if (quot1
== NULL_RTX
)
1212 if (op11
!= const1_rtx
)
1214 rtx rem2
= expand_divmod (1, TRUNC_MOD_EXPR
, mode
, NULL
, NULL
, quot1
,
1215 op11
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1216 if (rem2
== NULL_RTX
)
1219 rem2
= expand_simple_binop (mode
, MULT
, rem2
, op12
, NULL_RTX
,
1220 unsignedp
, OPTAB_DIRECT
);
1221 if (rem2
== NULL_RTX
)
1224 rem2
= expand_simple_binop (mode
, PLUS
, rem2
, rem1
, NULL_RTX
,
1225 unsignedp
, OPTAB_DIRECT
);
1226 if (rem2
== NULL_RTX
)
1229 rtx quot2
= expand_divmod (0, TRUNC_DIV_EXPR
, mode
, NULL
, NULL
, quot1
,
1230 op11
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1231 if (quot2
== NULL_RTX
)
1238 /* Punt if we need any library calls. */
1240 last
= NEXT_INSN (last
);
1242 last
= get_insns ();
1243 for (; last
; last
= NEXT_INSN (last
))
1251 /* Wrapper around expand_binop which takes an rtx code to specify
1252 the operation to perform, not an optab pointer. All other
1253 arguments are the same. */
1255 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
1256 rtx op1
, rtx target
, int unsignedp
,
1257 enum optab_methods methods
)
1259 optab binop
= code_to_optab (code
);
1262 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1265 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1266 binop. Order them according to commutative_operand_precedence and, if
1267 possible, try to put TARGET or a pseudo first. */
1269 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1271 int op0_prec
= commutative_operand_precedence (op0
);
1272 int op1_prec
= commutative_operand_precedence (op1
);
1274 if (op0_prec
< op1_prec
)
1277 if (op0_prec
> op1_prec
)
1280 /* With equal precedence, both orders are ok, but it is better if the
1281 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1282 if (target
== 0 || REG_P (target
))
1283 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1285 return rtx_equal_p (op1
, target
);
1288 /* Return true if BINOPTAB implements a shift operation. */
1291 shift_optab_p (optab binoptab
)
1293 switch (optab_to_code (binoptab
))
1309 /* Return true if BINOPTAB implements a commutative binary operation. */
1312 commutative_optab_p (optab binoptab
)
1314 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
1315 || binoptab
== smul_widen_optab
1316 || binoptab
== umul_widen_optab
1317 || binoptab
== smul_highpart_optab
1318 || binoptab
== umul_highpart_optab
);
1321 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1322 optimizing, and if the operand is a constant that costs more than
1323 1 instruction, force the constant into a register and return that
1324 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1327 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1328 int opn
, rtx x
, bool unsignedp
)
1330 bool speed
= optimize_insn_for_speed_p ();
1332 if (mode
!= VOIDmode
1335 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1336 > set_src_cost (x
, mode
, speed
)))
1338 if (CONST_INT_P (x
))
1340 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1341 if (intval
!= INTVAL (x
))
1342 x
= GEN_INT (intval
);
1345 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1346 x
= force_reg (mode
, x
);
1351 /* Helper function for expand_binop: handle the case where there
1352 is an insn ICODE that directly implements the indicated operation.
1353 Returns null if this is not possible. */
1355 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1357 rtx target
, int unsignedp
, enum optab_methods methods
,
1360 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1361 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1362 machine_mode mode0
, mode1
, tmp_mode
;
1363 class expand_operand ops
[3];
1366 rtx xop0
= op0
, xop1
= op1
;
1367 bool canonicalize_op1
= false;
1369 /* If it is a commutative operator and the modes would match
1370 if we would swap the operands, we can save the conversions. */
1371 commutative_p
= commutative_optab_p (binoptab
);
1373 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1374 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode0
)
1375 std::swap (xop0
, xop1
);
1377 /* If we are optimizing, force expensive constants into a register. */
1378 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1379 if (!shift_optab_p (binoptab
))
1380 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1382 /* Shifts and rotates often use a different mode for op1 from op0;
1383 for VOIDmode constants we don't know the mode, so force it
1384 to be canonicalized using convert_modes. */
1385 canonicalize_op1
= true;
1387 /* In case the insn wants input operands in modes different from
1388 those of the actual operands, convert the operands. It would
1389 seem that we don't need to convert CONST_INTs, but we do, so
1390 that they're properly zero-extended, sign-extended or truncated
1393 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1394 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1396 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1400 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1401 ? GET_MODE (xop1
) : mode
);
1402 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1404 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1408 /* If operation is commutative,
1409 try to make the first operand a register.
1410 Even better, try to make it the same as the target.
1411 Also try to make the last operand a constant. */
1413 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1414 std::swap (xop0
, xop1
);
1416 /* Now, if insn's predicates don't allow our operands, put them into
1419 if (binoptab
== vec_pack_trunc_optab
1420 || binoptab
== vec_pack_usat_optab
1421 || binoptab
== vec_pack_ssat_optab
1422 || binoptab
== vec_pack_ufix_trunc_optab
1423 || binoptab
== vec_pack_sfix_trunc_optab
1424 || binoptab
== vec_packu_float_optab
1425 || binoptab
== vec_packs_float_optab
)
1427 /* The mode of the result is different then the mode of the
1429 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1430 if (VECTOR_MODE_P (mode
)
1431 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1433 delete_insns_since (last
);
1440 create_output_operand (&ops
[0], target
, tmp_mode
);
1441 create_input_operand (&ops
[1], xop0
, mode0
);
1442 create_input_operand (&ops
[2], xop1
, mode1
);
1443 pat
= maybe_gen_insn (icode
, 3, ops
);
1446 /* If PAT is composed of more than one insn, try to add an appropriate
1447 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1448 operand, call expand_binop again, this time without a target. */
1449 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1450 && ! add_equal_note (pat
, ops
[0].value
,
1451 optab_to_code (binoptab
),
1452 ops
[1].value
, ops
[2].value
, mode0
))
1454 delete_insns_since (last
);
1455 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1456 unsignedp
, methods
);
1460 return ops
[0].value
;
1462 delete_insns_since (last
);
1466 /* Generate code to perform an operation specified by BINOPTAB
1467 on operands OP0 and OP1, with result having machine-mode MODE.
1469 UNSIGNEDP is for the case where we have to widen the operands
1470 to perform the operation. It says to use zero-extension.
1472 If TARGET is nonzero, the value
1473 is generated there, if it is convenient to do so.
1474 In all cases an rtx is returned for the locus of the value;
1475 this may or may not be TARGET. */
1478 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1479 rtx target
, int unsignedp
, enum optab_methods methods
)
1481 enum optab_methods next_methods
1482 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1483 ? OPTAB_WIDEN
: methods
);
1484 enum mode_class mclass
;
1485 enum insn_code icode
;
1486 machine_mode wider_mode
;
1487 scalar_int_mode int_mode
;
1490 rtx_insn
*entry_last
= get_last_insn ();
1493 mclass
= GET_MODE_CLASS (mode
);
1495 /* If subtracting an integer constant, convert this into an addition of
1496 the negated constant. */
1498 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1500 op1
= negate_rtx (mode
, op1
);
1501 binoptab
= add_optab
;
1503 /* For shifts, constant invalid op1 might be expanded from different
1504 mode than MODE. As those are invalid, force them to a register
1505 to avoid further problems during expansion. */
1506 else if (CONST_INT_P (op1
)
1507 && shift_optab_p (binoptab
)
1508 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1510 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1511 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1514 /* Record where to delete back to if we backtrack. */
1515 last
= get_last_insn ();
1517 /* If we can do it with a three-operand insn, do so. */
1519 if (methods
!= OPTAB_MUST_WIDEN
)
1521 if (convert_optab_p (binoptab
))
1523 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1524 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1527 icode
= optab_handler (binoptab
, mode
);
1528 if (icode
!= CODE_FOR_nothing
)
1530 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1531 target
, unsignedp
, methods
, last
);
1537 /* If we were trying to rotate, and that didn't work, try rotating
1538 the other direction before falling back to shifts and bitwise-or. */
1539 if (((binoptab
== rotl_optab
1540 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1541 || (binoptab
== rotr_optab
1542 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1543 && is_int_mode (mode
, &int_mode
))
1545 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1547 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1549 if (CONST_INT_P (op1
))
1550 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1551 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1552 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1554 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1555 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1556 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1558 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1559 target
, unsignedp
, methods
, last
);
1564 /* If this is a multiply, see if we can do a widening operation that
1565 takes operands of this mode and makes a wider mode. */
1567 if (binoptab
== smul_optab
1568 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1569 && (convert_optab_handler ((unsignedp
1571 : smul_widen_optab
),
1572 wider_mode
, mode
) != CODE_FOR_nothing
))
1574 /* *_widen_optab needs to determine operand mode, make sure at least
1575 one operand has non-VOID mode. */
1576 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1577 op0
= force_reg (mode
, op0
);
1578 temp
= expand_binop (wider_mode
,
1579 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1580 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1584 if (GET_MODE_CLASS (mode
) == MODE_INT
1585 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1586 return gen_lowpart (mode
, temp
);
1588 return convert_to_mode (mode
, temp
, unsignedp
);
1592 /* If this is a vector shift by a scalar, see if we can do a vector
1593 shift by a vector. If so, broadcast the scalar into a vector. */
1594 if (mclass
== MODE_VECTOR_INT
)
1596 optab otheroptab
= unknown_optab
;
1598 if (binoptab
== ashl_optab
)
1599 otheroptab
= vashl_optab
;
1600 else if (binoptab
== ashr_optab
)
1601 otheroptab
= vashr_optab
;
1602 else if (binoptab
== lshr_optab
)
1603 otheroptab
= vlshr_optab
;
1604 else if (binoptab
== rotl_optab
)
1605 otheroptab
= vrotl_optab
;
1606 else if (binoptab
== rotr_optab
)
1607 otheroptab
= vrotr_optab
;
1610 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1612 /* The scalar may have been extended to be too wide. Truncate
1613 it back to the proper size to fit in the broadcast vector. */
1614 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1615 if (!CONST_INT_P (op1
)
1616 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1617 > GET_MODE_BITSIZE (inner_mode
)))
1618 op1
= force_reg (inner_mode
,
1619 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1621 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1624 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1625 target
, unsignedp
, methods
, last
);
1632 /* Look for a wider mode of the same class for which we think we
1633 can open-code the operation. Check for a widening multiply at the
1634 wider mode as well. */
1636 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1637 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1638 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1640 machine_mode next_mode
;
1641 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1642 || (binoptab
== smul_optab
1643 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1644 && (find_widening_optab_handler ((unsignedp
1646 : smul_widen_optab
),
1648 != CODE_FOR_nothing
)))
1650 rtx xop0
= op0
, xop1
= op1
;
1653 /* For certain integer operations, we need not actually extend
1654 the narrow operands, as long as we will truncate
1655 the results to the same narrowness. */
1657 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1658 || binoptab
== xor_optab
1659 || binoptab
== add_optab
|| binoptab
== sub_optab
1660 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1661 && mclass
== MODE_INT
)
1664 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1666 if (binoptab
!= ashl_optab
)
1667 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1671 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1673 /* The second operand of a shift must always be extended. */
1674 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1675 no_extend
&& binoptab
!= ashl_optab
);
1677 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1678 unsignedp
, OPTAB_DIRECT
);
1681 if (mclass
!= MODE_INT
1682 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1685 target
= gen_reg_rtx (mode
);
1686 convert_move (target
, temp
, 0);
1690 return gen_lowpart (mode
, temp
);
1693 delete_insns_since (last
);
1697 /* If operation is commutative,
1698 try to make the first operand a register.
1699 Even better, try to make it the same as the target.
1700 Also try to make the last operand a constant. */
1701 if (commutative_optab_p (binoptab
)
1702 && swap_commutative_operands_with_target (target
, op0
, op1
))
1703 std::swap (op0
, op1
);
1705 /* These can be done a word at a time. */
1706 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1707 && is_int_mode (mode
, &int_mode
)
1708 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1709 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1714 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1715 won't be accurate, so use a new target. */
1719 || reg_overlap_mentioned_p (target
, op0
)
1720 || reg_overlap_mentioned_p (target
, op1
)
1721 || !valid_multiword_target_p (target
))
1722 target
= gen_reg_rtx (int_mode
);
1726 /* Do the actual arithmetic. */
1727 machine_mode op0_mode
= GET_MODE (op0
);
1728 machine_mode op1_mode
= GET_MODE (op1
);
1729 if (op0_mode
== VOIDmode
)
1730 op0_mode
= int_mode
;
1731 if (op1_mode
== VOIDmode
)
1732 op1_mode
= int_mode
;
1733 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1735 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1736 rtx x
= expand_binop (word_mode
, binoptab
,
1737 operand_subword_force (op0
, i
, op0_mode
),
1738 operand_subword_force (op1
, i
, op1_mode
),
1739 target_piece
, unsignedp
, next_methods
);
1744 if (target_piece
!= x
)
1745 emit_move_insn (target_piece
, x
);
1748 insns
= get_insns ();
1751 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1758 /* Synthesize double word shifts from single word shifts. */
1759 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1760 || binoptab
== ashr_optab
)
1761 && is_int_mode (mode
, &int_mode
)
1762 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1763 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1764 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1765 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1766 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1767 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1769 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1770 scalar_int_mode op1_mode
;
1772 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1773 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1774 op1_mode
= (GET_MODE (op1
) != VOIDmode
1775 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1778 /* Apply the truncation to constant shifts. */
1779 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1780 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1782 if (op1
== CONST0_RTX (op1_mode
))
1785 /* Make sure that this is a combination that expand_doubleword_shift
1786 can handle. See the comments there for details. */
1787 if (double_shift_mask
== 0
1788 || (shift_mask
== BITS_PER_WORD
- 1
1789 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1792 rtx into_target
, outof_target
;
1793 rtx into_input
, outof_input
;
1794 int left_shift
, outof_word
;
1796 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1797 won't be accurate, so use a new target. */
1801 || reg_overlap_mentioned_p (target
, op0
)
1802 || reg_overlap_mentioned_p (target
, op1
)
1803 || !valid_multiword_target_p (target
))
1804 target
= gen_reg_rtx (int_mode
);
1808 /* OUTOF_* is the word we are shifting bits away from, and
1809 INTO_* is the word that we are shifting bits towards, thus
1810 they differ depending on the direction of the shift and
1811 WORDS_BIG_ENDIAN. */
1813 left_shift
= binoptab
== ashl_optab
;
1814 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1816 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1817 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1819 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1820 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1822 if (expand_doubleword_shift (op1_mode
, binoptab
,
1823 outof_input
, into_input
, op1
,
1824 outof_target
, into_target
,
1825 unsignedp
, next_methods
, shift_mask
))
1827 insns
= get_insns ();
1837 /* Synthesize double word rotates from single word shifts. */
1838 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1839 && is_int_mode (mode
, &int_mode
)
1840 && CONST_INT_P (op1
)
1841 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1842 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1843 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1846 rtx into_target
, outof_target
;
1847 rtx into_input
, outof_input
;
1849 int shift_count
, left_shift
, outof_word
;
1851 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1852 won't be accurate, so use a new target. Do this also if target is not
1853 a REG, first because having a register instead may open optimization
1854 opportunities, and second because if target and op0 happen to be MEMs
1855 designating the same location, we would risk clobbering it too early
1856 in the code sequence we generate below. */
1861 || reg_overlap_mentioned_p (target
, op0
)
1862 || reg_overlap_mentioned_p (target
, op1
)
1863 || !valid_multiword_target_p (target
))
1864 target
= gen_reg_rtx (int_mode
);
1868 shift_count
= INTVAL (op1
);
1870 /* OUTOF_* is the word we are shifting bits away from, and
1871 INTO_* is the word that we are shifting bits towards, thus
1872 they differ depending on the direction of the shift and
1873 WORDS_BIG_ENDIAN. */
1875 left_shift
= (binoptab
== rotl_optab
);
1876 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1878 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1879 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1881 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1882 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1884 if (shift_count
== BITS_PER_WORD
)
1886 /* This is just a word swap. */
1887 emit_move_insn (outof_target
, into_input
);
1888 emit_move_insn (into_target
, outof_input
);
1893 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1894 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1895 optab reverse_unsigned_shift
, unsigned_shift
;
1897 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1898 ? lshr_optab
: ashl_optab
);
1900 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1901 ? ashl_optab
: lshr_optab
);
1903 if (shift_count
> BITS_PER_WORD
)
1905 first_shift_count
= shift_count
- BITS_PER_WORD
;
1906 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1910 first_shift_count
= BITS_PER_WORD
- shift_count
;
1911 second_shift_count
= shift_count
;
1913 rtx first_shift_count_rtx
1914 = gen_int_shift_amount (word_mode
, first_shift_count
);
1915 rtx second_shift_count_rtx
1916 = gen_int_shift_amount (word_mode
, second_shift_count
);
1918 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1919 outof_input
, first_shift_count_rtx
,
1920 NULL_RTX
, unsignedp
, next_methods
);
1921 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1922 into_input
, second_shift_count_rtx
,
1923 NULL_RTX
, unsignedp
, next_methods
);
1925 if (into_temp1
!= 0 && into_temp2
!= 0)
1926 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1927 into_target
, unsignedp
, next_methods
);
1931 if (inter
!= 0 && inter
!= into_target
)
1932 emit_move_insn (into_target
, inter
);
1934 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1935 into_input
, first_shift_count_rtx
,
1936 NULL_RTX
, unsignedp
, next_methods
);
1937 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1938 outof_input
, second_shift_count_rtx
,
1939 NULL_RTX
, unsignedp
, next_methods
);
1941 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1942 inter
= expand_binop (word_mode
, ior_optab
,
1943 outof_temp1
, outof_temp2
,
1944 outof_target
, unsignedp
, next_methods
);
1946 if (inter
!= 0 && inter
!= outof_target
)
1947 emit_move_insn (outof_target
, inter
);
1950 insns
= get_insns ();
1960 /* These can be done a word at a time by propagating carries. */
1961 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1962 && is_int_mode (mode
, &int_mode
)
1963 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1964 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1967 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1968 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1969 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1970 rtx xop0
, xop1
, xtarget
;
1972 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1973 value is one of those, use it. Otherwise, use 1 since it is the
1974 one easiest to get. */
1975 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1976 int normalizep
= STORE_FLAG_VALUE
;
1981 /* Prepare the operands. */
1982 xop0
= force_reg (int_mode
, op0
);
1983 xop1
= force_reg (int_mode
, op1
);
1985 xtarget
= gen_reg_rtx (int_mode
);
1987 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1990 /* Indicate for flow that the entire target reg is being set. */
1992 emit_clobber (xtarget
);
1994 /* Do the actual arithmetic. */
1995 for (i
= 0; i
< nwords
; i
++)
1997 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1998 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
1999 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
2000 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
2003 /* Main add/subtract of the input operands. */
2004 x
= expand_binop (word_mode
, binoptab
,
2005 op0_piece
, op1_piece
,
2006 target_piece
, unsignedp
, next_methods
);
2012 /* Store carry from main add/subtract. */
2013 carry_out
= gen_reg_rtx (word_mode
);
2014 carry_out
= emit_store_flag_force (carry_out
,
2015 (binoptab
== add_optab
2018 word_mode
, 1, normalizep
);
2025 /* Add/subtract previous carry to main result. */
2026 newx
= expand_binop (word_mode
,
2027 normalizep
== 1 ? binoptab
: otheroptab
,
2029 NULL_RTX
, 1, next_methods
);
2033 /* Get out carry from adding/subtracting carry in. */
2034 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2035 carry_tmp
= emit_store_flag_force (carry_tmp
,
2036 (binoptab
== add_optab
2039 word_mode
, 1, normalizep
);
2041 /* Logical-ior the two poss. carry together. */
2042 carry_out
= expand_binop (word_mode
, ior_optab
,
2043 carry_out
, carry_tmp
,
2044 carry_out
, 0, next_methods
);
2048 emit_move_insn (target_piece
, newx
);
2052 if (x
!= target_piece
)
2053 emit_move_insn (target_piece
, x
);
2056 carry_in
= carry_out
;
2059 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
2061 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
2062 || ! rtx_equal_p (target
, xtarget
))
2064 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
2066 set_dst_reg_note (temp
, REG_EQUAL
,
2067 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2068 int_mode
, copy_rtx (xop0
),
2079 delete_insns_since (last
);
2082 /* Attempt to synthesize double word multiplies using a sequence of word
2083 mode multiplications. We first attempt to generate a sequence using a
2084 more efficient unsigned widening multiply, and if that fails we then
2085 try using a signed widening multiply. */
2087 if (binoptab
== smul_optab
2088 && is_int_mode (mode
, &int_mode
)
2089 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2090 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2091 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2093 rtx product
= NULL_RTX
;
2094 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
2095 != CODE_FOR_nothing
)
2097 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2100 delete_insns_since (last
);
2103 if (product
== NULL_RTX
2104 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
2105 != CODE_FOR_nothing
))
2107 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2110 delete_insns_since (last
);
2113 if (product
!= NULL_RTX
)
2115 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2117 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
2119 set_dst_reg_note (move
,
2121 gen_rtx_fmt_ee (MULT
, int_mode
,
2124 target
? target
: product
);
2130 /* Attempt to synthetize double word modulo by constant divisor. */
2131 if ((binoptab
== umod_optab
2132 || binoptab
== smod_optab
2133 || binoptab
== udiv_optab
2134 || binoptab
== sdiv_optab
)
2136 && CONST_INT_P (op1
)
2137 && is_int_mode (mode
, &int_mode
)
2138 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2139 && optab_handler ((binoptab
== umod_optab
|| binoptab
== udiv_optab
)
2140 ? udivmod_optab
: sdivmod_optab
,
2141 int_mode
) == CODE_FOR_nothing
2142 && optab_handler (and_optab
, word_mode
) != CODE_FOR_nothing
2143 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
2144 && optimize_insn_for_speed_p ())
2147 if ((binoptab
== umod_optab
|| binoptab
== smod_optab
)
2148 && (INTVAL (op1
) & 1) == 0)
2149 res
= expand_doubleword_mod (int_mode
, op0
, op1
,
2150 binoptab
== umod_optab
);
2153 rtx quot
= expand_doubleword_divmod (int_mode
, op0
, op1
, &res
,
2154 binoptab
== umod_optab
2155 || binoptab
== udiv_optab
);
2156 if (quot
== NULL_RTX
)
2158 else if (binoptab
== udiv_optab
|| binoptab
== sdiv_optab
)
2161 if (res
!= NULL_RTX
)
2163 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2165 rtx_insn
*move
= emit_move_insn (target
? target
: res
,
2167 set_dst_reg_note (move
, REG_EQUAL
,
2168 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2169 int_mode
, copy_rtx (op0
), op1
),
2170 target
? target
: res
);
2175 delete_insns_since (last
);
2178 /* It can't be open-coded in this mode.
2179 Use a library call if one is available and caller says that's ok. */
2181 libfunc
= optab_libfunc (binoptab
, mode
);
2183 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2187 machine_mode op1_mode
= mode
;
2192 if (shift_optab_p (binoptab
))
2194 op1_mode
= targetm
.libgcc_shift_count_mode ();
2195 /* Specify unsigned here,
2196 since negative shift counts are meaningless. */
2197 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2200 if (GET_MODE (op0
) != VOIDmode
2201 && GET_MODE (op0
) != mode
)
2202 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2204 /* Pass 1 for NO_QUEUE so we don't lose any increments
2205 if the libcall is cse'd or moved. */
2206 value
= emit_library_call_value (libfunc
,
2207 NULL_RTX
, LCT_CONST
, mode
,
2208 op0
, mode
, op1x
, op1_mode
);
2210 insns
= get_insns ();
2213 bool trapv
= trapv_binoptab_p (binoptab
);
2214 target
= gen_reg_rtx (mode
);
2215 emit_libcall_block_1 (insns
, target
, value
,
2217 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
2218 mode
, op0
, op1
), trapv
);
2223 delete_insns_since (last
);
2225 /* It can't be done in this mode. Can we do it in a wider mode? */
2227 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2228 || methods
== OPTAB_MUST_WIDEN
))
2230 /* Caller says, don't even try. */
2231 delete_insns_since (entry_last
);
2235 /* Compute the value of METHODS to pass to recursive calls.
2236 Don't allow widening to be tried recursively. */
2238 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2240 /* Look for a wider mode of the same class for which it appears we can do
2243 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2245 /* This code doesn't make sense for conversion optabs, since we
2246 wouldn't then want to extend the operands to be the same size
2248 gcc_assert (!convert_optab_p (binoptab
));
2249 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2251 if (optab_handler (binoptab
, wider_mode
)
2252 || (methods
== OPTAB_LIB
2253 && optab_libfunc (binoptab
, wider_mode
)))
2255 rtx xop0
= op0
, xop1
= op1
;
2258 /* For certain integer operations, we need not actually extend
2259 the narrow operands, as long as we will truncate
2260 the results to the same narrowness. */
2262 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2263 || binoptab
== xor_optab
2264 || binoptab
== add_optab
|| binoptab
== sub_optab
2265 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2266 && mclass
== MODE_INT
)
2269 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2270 unsignedp
, no_extend
);
2272 /* The second operand of a shift must always be extended. */
2273 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2274 no_extend
&& binoptab
!= ashl_optab
);
2276 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2277 unsignedp
, methods
);
2280 if (mclass
!= MODE_INT
2281 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2284 target
= gen_reg_rtx (mode
);
2285 convert_move (target
, temp
, 0);
2289 return gen_lowpart (mode
, temp
);
2292 delete_insns_since (last
);
2297 delete_insns_since (entry_last
);
2301 /* Expand a binary operator which has both signed and unsigned forms.
2302 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2305 If we widen unsigned operands, we may use a signed wider operation instead
2306 of an unsigned wider operation, since the result would be the same. */
2309 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
2310 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2311 enum optab_methods methods
)
2314 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2317 /* Do it without widening, if possible. */
2318 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2319 unsignedp
, OPTAB_DIRECT
);
2320 if (temp
|| methods
== OPTAB_DIRECT
)
2323 /* Try widening to a signed int. Disable any direct use of any
2324 signed insn in the current mode. */
2325 save_enable
= swap_optab_enable (soptab
, mode
, false);
2327 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2328 unsignedp
, OPTAB_WIDEN
);
2330 /* For unsigned operands, try widening to an unsigned int. */
2331 if (!temp
&& unsignedp
)
2332 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2333 unsignedp
, OPTAB_WIDEN
);
2334 if (temp
|| methods
== OPTAB_WIDEN
)
2337 /* Use the right width libcall if that exists. */
2338 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2339 unsignedp
, OPTAB_LIB
);
2340 if (temp
|| methods
== OPTAB_LIB
)
2343 /* Must widen and use a libcall, use either signed or unsigned. */
2344 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2345 unsignedp
, methods
);
2346 if (!temp
&& unsignedp
)
2347 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2348 unsignedp
, methods
);
2351 /* Undo the fiddling above. */
2353 swap_optab_enable (soptab
, mode
, true);
2357 /* Generate code to perform an operation specified by UNOPPTAB
2358 on operand OP0, with two results to TARG0 and TARG1.
2359 We assume that the order of the operands for the instruction
2360 is TARG0, TARG1, OP0.
2362 Either TARG0 or TARG1 may be zero, but what that means is that
2363 the result is not actually wanted. We will generate it into
2364 a dummy pseudo-reg and discard it. They may not both be zero.
2366 Returns 1 if this operation can be performed; 0 if not. */
2369 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2372 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2373 enum mode_class mclass
;
2374 machine_mode wider_mode
;
2375 rtx_insn
*entry_last
= get_last_insn ();
2378 mclass
= GET_MODE_CLASS (mode
);
2381 targ0
= gen_reg_rtx (mode
);
2383 targ1
= gen_reg_rtx (mode
);
2385 /* Record where to go back to if we fail. */
2386 last
= get_last_insn ();
2388 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2390 class expand_operand ops
[3];
2391 enum insn_code icode
= optab_handler (unoptab
, mode
);
2393 create_fixed_operand (&ops
[0], targ0
);
2394 create_fixed_operand (&ops
[1], targ1
);
2395 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2396 if (maybe_expand_insn (icode
, 3, ops
))
2400 /* It can't be done in this mode. Can we do it in a wider mode? */
2402 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2404 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2406 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2408 rtx t0
= gen_reg_rtx (wider_mode
);
2409 rtx t1
= gen_reg_rtx (wider_mode
);
2410 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2412 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2414 convert_move (targ0
, t0
, unsignedp
);
2415 convert_move (targ1
, t1
, unsignedp
);
2419 delete_insns_since (last
);
2424 delete_insns_since (entry_last
);
2428 /* Generate code to perform an operation specified by BINOPTAB
2429 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2430 We assume that the order of the operands for the instruction
2431 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2432 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2434 Either TARG0 or TARG1 may be zero, but what that means is that
2435 the result is not actually wanted. We will generate it into
2436 a dummy pseudo-reg and discard it. They may not both be zero.
2438 Returns 1 if this operation can be performed; 0 if not. */
2441 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2444 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2445 enum mode_class mclass
;
2446 machine_mode wider_mode
;
2447 rtx_insn
*entry_last
= get_last_insn ();
2450 mclass
= GET_MODE_CLASS (mode
);
2453 targ0
= gen_reg_rtx (mode
);
2455 targ1
= gen_reg_rtx (mode
);
2457 /* Record where to go back to if we fail. */
2458 last
= get_last_insn ();
2460 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2462 class expand_operand ops
[4];
2463 enum insn_code icode
= optab_handler (binoptab
, mode
);
2464 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2465 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2466 rtx xop0
= op0
, xop1
= op1
;
2468 /* If we are optimizing, force expensive constants into a register. */
2469 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2470 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2472 create_fixed_operand (&ops
[0], targ0
);
2473 create_convert_operand_from (&ops
[1], xop0
, mode
, unsignedp
);
2474 create_convert_operand_from (&ops
[2], xop1
, mode
, unsignedp
);
2475 create_fixed_operand (&ops
[3], targ1
);
2476 if (maybe_expand_insn (icode
, 4, ops
))
2478 delete_insns_since (last
);
2481 /* It can't be done in this mode. Can we do it in a wider mode? */
2483 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2485 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2487 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2489 rtx t0
= gen_reg_rtx (wider_mode
);
2490 rtx t1
= gen_reg_rtx (wider_mode
);
2491 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2492 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2494 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2497 convert_move (targ0
, t0
, unsignedp
);
2498 convert_move (targ1
, t1
, unsignedp
);
2502 delete_insns_since (last
);
2507 delete_insns_since (entry_last
);
2511 /* Expand the two-valued library call indicated by BINOPTAB, but
2512 preserve only one of the values. If TARG0 is non-NULL, the first
2513 value is placed into TARG0; otherwise the second value is placed
2514 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2515 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2516 This routine assumes that the value returned by the library call is
2517 as if the return value was of an integral mode twice as wide as the
2518 mode of OP0. Returns 1 if the call was successful. */
2521 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2522 rtx targ0
, rtx targ1
, enum rtx_code code
)
2525 machine_mode libval_mode
;
2530 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2531 gcc_assert (!targ0
!= !targ1
);
2533 mode
= GET_MODE (op0
);
2534 libfunc
= optab_libfunc (binoptab
, mode
);
2538 /* The value returned by the library function will have twice as
2539 many bits as the nominal MODE. */
2540 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2542 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2546 /* Get the part of VAL containing the value that we want. */
2547 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2548 targ0
? 0 : GET_MODE_SIZE (mode
));
2549 insns
= get_insns ();
2551 /* Move the into the desired location. */
2552 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2553 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2559 /* Wrapper around expand_unop which takes an rtx code to specify
2560 the operation to perform, not an optab pointer. All other
2561 arguments are the same. */
2563 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2564 rtx target
, int unsignedp
)
2566 optab unop
= code_to_optab (code
);
2569 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2575 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2577 A similar operation can be used for clrsb. UNOPTAB says which operation
2578 we are trying to expand. */
2580 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2582 opt_scalar_int_mode wider_mode_iter
;
2583 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2585 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2586 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2591 last
= get_last_insn ();
2594 target
= gen_reg_rtx (mode
);
2595 xop0
= widen_operand (op0
, wider_mode
, mode
,
2596 unoptab
!= clrsb_optab
, false);
2597 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2598 unoptab
!= clrsb_optab
);
2601 (wider_mode
, sub_optab
, temp
,
2602 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2603 - GET_MODE_PRECISION (mode
),
2605 target
, true, OPTAB_DIRECT
);
2607 delete_insns_since (last
);
2615 /* Attempt to emit (clrsb:mode op0) as
2616 (plus:mode (clz:mode (xor:mode op0 (ashr:mode op0 (const_int prec-1))))
2618 if CLZ_DEFINED_VALUE_AT_ZERO (mode, val) is 2 and val is prec,
2620 (clz:mode (ior:mode (xor:mode (ashl:mode op0 (const_int 1))
2621 (ashr:mode op0 (const_int prec-1)))
2626 expand_clrsb_using_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2628 if (optimize_insn_for_size_p ()
2629 || optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2633 HOST_WIDE_INT val
= 0;
2634 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) != 2
2635 || val
!= GET_MODE_PRECISION (mode
))
2643 temp2
= expand_binop (mode
, ashl_optab
, op0
, const1_rtx
,
2644 NULL_RTX
, 0, OPTAB_DIRECT
);
2653 rtx temp
= expand_binop (mode
, ashr_optab
, op0
,
2654 GEN_INT (GET_MODE_PRECISION (mode
) - 1),
2655 NULL_RTX
, 0, OPTAB_DIRECT
);
2659 temp
= expand_binop (mode
, xor_optab
, temp2
, temp
, NULL_RTX
, 0,
2666 temp
= expand_binop (mode
, ior_optab
, temp
, const1_rtx
,
2667 NULL_RTX
, 0, OPTAB_DIRECT
);
2671 temp
= expand_unop_direct (mode
, clz_optab
, temp
, val
? NULL_RTX
: target
,
2677 temp
= expand_binop (mode
, add_optab
, temp
, constm1_rtx
,
2678 target
, 0, OPTAB_DIRECT
);
2683 rtx_insn
*seq
= get_insns ();
2686 add_equal_note (seq
, temp
, CLRSB
, op0
, NULL_RTX
, mode
);
2691 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2692 quantities, choosing which based on whether the high word is nonzero. */
2694 expand_doubleword_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2696 rtx xop0
= force_reg (mode
, op0
);
2697 rtx subhi
= gen_highpart (word_mode
, xop0
);
2698 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2699 rtx_code_label
*hi0_label
= gen_label_rtx ();
2700 rtx_code_label
*after_label
= gen_label_rtx ();
2704 /* If we were not given a target, use a word_mode register, not a
2705 'mode' register. The result will fit, and nobody is expecting
2706 anything bigger (the return type of __builtin_clz* is int). */
2708 target
= gen_reg_rtx (word_mode
);
2710 /* In any case, write to a word_mode scratch in both branches of the
2711 conditional, so we can ensure there is a single move insn setting
2712 'target' to tag a REG_EQUAL note on. */
2713 result
= gen_reg_rtx (word_mode
);
2717 /* If the high word is not equal to zero,
2718 then clz of the full value is clz of the high word. */
2719 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2720 word_mode
, true, hi0_label
);
2722 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2727 convert_move (result
, temp
, true);
2729 emit_jump_insn (targetm
.gen_jump (after_label
));
2732 /* Else clz of the full value is clz of the low word plus the number
2733 of bits in the high word. */
2734 emit_label (hi0_label
);
2736 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2739 temp
= expand_binop (word_mode
, add_optab
, temp
,
2740 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2741 result
, true, OPTAB_DIRECT
);
2745 convert_move (result
, temp
, true);
2747 emit_label (after_label
);
2748 convert_move (target
, result
, true);
2753 add_equal_note (seq
, target
, CLZ
, xop0
, NULL_RTX
, mode
);
2762 /* Try calculating popcount of a double-word quantity as two popcount's of
2763 word-sized quantities and summing up the results. */
2765 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2772 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2773 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2775 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2776 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2784 /* If we were not given a target, use a word_mode register, not a
2785 'mode' register. The result will fit, and nobody is expecting
2786 anything bigger (the return type of __builtin_popcount* is int). */
2788 target
= gen_reg_rtx (word_mode
);
2790 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2795 add_equal_note (seq
, t
, POPCOUNT
, op0
, NULL_RTX
, mode
);
2803 (parity:narrow (low (x) ^ high (x))) */
2805 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2807 rtx t
= expand_binop (word_mode
, xor_optab
,
2808 operand_subword_force (op0
, 0, mode
),
2809 operand_subword_force (op0
, 1, mode
),
2810 NULL_RTX
, 0, OPTAB_DIRECT
);
2811 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2817 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2819 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2823 opt_scalar_int_mode wider_mode_iter
;
2825 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2826 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2827 != CODE_FOR_nothing
)
2830 if (!wider_mode_iter
.exists ())
2833 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2834 last
= get_last_insn ();
2836 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2837 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2839 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2840 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2842 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2843 GET_MODE_BITSIZE (wider_mode
)
2844 - GET_MODE_BITSIZE (mode
),
2850 target
= gen_reg_rtx (mode
);
2851 emit_move_insn (target
, gen_lowpart (mode
, x
));
2854 delete_insns_since (last
);
2859 /* Try calculating bswap as two bswaps of two word-sized operands. */
2862 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2866 t1
= expand_unop (word_mode
, bswap_optab
,
2867 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2868 t0
= expand_unop (word_mode
, bswap_optab
,
2869 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2871 if (target
== 0 || !valid_multiword_target_p (target
))
2872 target
= gen_reg_rtx (mode
);
2874 emit_clobber (target
);
2875 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2876 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2881 /* Try calculating (parity x) as (and (popcount x) 1), where
2882 popcount can also be done in a wider mode. */
2884 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2886 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2887 opt_scalar_int_mode wider_mode_iter
;
2888 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2890 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2891 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2896 last
= get_last_insn ();
2898 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2899 target
= gen_reg_rtx (wider_mode
);
2901 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2902 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2905 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2906 target
, true, OPTAB_DIRECT
);
2910 if (mclass
!= MODE_INT
2911 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2912 return convert_to_mode (mode
, temp
, 0);
2914 return gen_lowpart (mode
, temp
);
2917 delete_insns_since (last
);
2923 /* Try calculating ctz(x) as K - clz(x & -x) ,
2924 where K is GET_MODE_PRECISION(mode) - 1.
2926 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2927 don't have to worry about what the hardware does in that case. (If
2928 the clz instruction produces the usual value at 0, which is K, the
2929 result of this code sequence will be -1; expand_ffs, below, relies
2930 on this. It might be nice to have it be K instead, for consistency
2931 with the (very few) processors that provide a ctz with a defined
2932 value, but that would take one more instruction, and it would be
2933 less convenient for expand_ffs anyway. */
2936 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2941 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2946 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2948 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2949 true, OPTAB_DIRECT
);
2951 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2953 temp
= expand_binop (mode
, sub_optab
,
2954 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2956 true, OPTAB_DIRECT
);
2966 add_equal_note (seq
, temp
, CTZ
, op0
, NULL_RTX
, mode
);
2972 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2973 else with the sequence used by expand_clz.
2975 The ffs builtin promises to return zero for a zero value and ctz/clz
2976 may have an undefined value in that case. If they do not give us a
2977 convenient value, we have to generate a test and branch. */
2979 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
2981 HOST_WIDE_INT val
= 0;
2982 bool defined_at_zero
= false;
2986 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2990 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2994 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2996 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2999 temp
= expand_ctz (mode
, op0
, 0);
3003 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
3005 defined_at_zero
= true;
3006 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
3012 if (defined_at_zero
&& val
== -1)
3013 /* No correction needed at zero. */;
3016 /* We don't try to do anything clever with the situation found
3017 on some processors (eg Alpha) where ctz(0:mode) ==
3018 bitsize(mode). If someone can think of a way to send N to -1
3019 and leave alone all values in the range 0..N-1 (where N is a
3020 power of two), cheaper than this test-and-branch, please add it.
3022 The test-and-branch is done after the operation itself, in case
3023 the operation sets condition codes that can be recycled for this.
3024 (This is true on i386, for instance.) */
3026 rtx_code_label
*nonzero_label
= gen_label_rtx ();
3027 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
3028 mode
, true, nonzero_label
);
3030 convert_move (temp
, GEN_INT (-1), false);
3031 emit_label (nonzero_label
);
3034 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
3035 to produce a value in the range 0..bitsize. */
3036 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
3037 target
, false, OPTAB_DIRECT
);
3044 add_equal_note (seq
, temp
, FFS
, op0
, NULL_RTX
, mode
);
3053 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
3054 conditions, VAL may already be a SUBREG against which we cannot generate
3055 a further SUBREG. In this case, we expect forcing the value into a
3056 register will work around the situation. */
3059 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
3063 ret
= lowpart_subreg (omode
, val
, imode
);
3066 val
= force_reg (imode
, val
);
3067 ret
= lowpart_subreg (omode
, val
, imode
);
3068 gcc_assert (ret
!= NULL
);
3073 /* Expand a floating point absolute value or negation operation via a
3074 logical operation on the sign bit. */
3077 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
3078 rtx op0
, rtx target
)
3080 const struct real_format
*fmt
;
3081 int bitpos
, word
, nwords
, i
;
3082 scalar_int_mode imode
;
3086 /* The format has to have a simple sign bit. */
3087 fmt
= REAL_MODE_FORMAT (mode
);
3091 bitpos
= fmt
->signbit_rw
;
3095 /* Don't create negative zeros if the format doesn't support them. */
3096 if (code
== NEG
&& !fmt
->has_signed_zero
)
3099 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3101 if (!int_mode_for_mode (mode
).exists (&imode
))
3110 if (FLOAT_WORDS_BIG_ENDIAN
)
3111 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3113 word
= bitpos
/ BITS_PER_WORD
;
3114 bitpos
= bitpos
% BITS_PER_WORD
;
3115 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3118 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3124 || reg_overlap_mentioned_p (target
, op0
)
3125 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3126 target
= gen_reg_rtx (mode
);
3132 for (i
= 0; i
< nwords
; ++i
)
3134 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3135 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3139 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3141 immed_wide_int_const (mask
, imode
),
3142 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3143 if (temp
!= targ_piece
)
3144 emit_move_insn (targ_piece
, temp
);
3147 emit_move_insn (targ_piece
, op0_piece
);
3150 insns
= get_insns ();
3157 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3158 gen_lowpart (imode
, op0
),
3159 immed_wide_int_const (mask
, imode
),
3160 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3161 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3163 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
3164 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
3171 /* As expand_unop, but will fail rather than attempt the operation in a
3172 different mode or with a libcall. */
3174 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3177 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
3179 class expand_operand ops
[2];
3180 enum insn_code icode
= optab_handler (unoptab
, mode
);
3181 rtx_insn
*last
= get_last_insn ();
3184 create_output_operand (&ops
[0], target
, mode
);
3185 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
3186 pat
= maybe_gen_insn (icode
, 2, ops
);
3189 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3190 && ! add_equal_note (pat
, ops
[0].value
,
3191 optab_to_code (unoptab
),
3192 ops
[1].value
, NULL_RTX
, mode
))
3194 delete_insns_since (last
);
3195 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3200 return ops
[0].value
;
3206 /* Generate code to perform an operation specified by UNOPTAB
3207 on operand OP0, with result having machine-mode MODE.
3209 UNSIGNEDP is for the case where we have to widen the operands
3210 to perform the operation. It says to use zero-extension.
3212 If TARGET is nonzero, the value
3213 is generated there, if it is convenient to do so.
3214 In all cases an rtx is returned for the locus of the value;
3215 this may or may not be TARGET. */
3218 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3221 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3222 machine_mode wider_mode
;
3223 scalar_int_mode int_mode
;
3224 scalar_float_mode float_mode
;
3228 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3232 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3234 /* Widening (or narrowing) clz needs special treatment. */
3235 if (unoptab
== clz_optab
)
3237 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3239 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3243 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3244 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3246 temp
= expand_doubleword_clz (int_mode
, op0
, target
);
3255 if (unoptab
== clrsb_optab
)
3257 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3259 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3262 temp
= expand_clrsb_using_clz (int_mode
, op0
, target
);
3269 if (unoptab
== popcount_optab
3270 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3271 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3272 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3273 && optimize_insn_for_speed_p ())
3275 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
3280 if (unoptab
== parity_optab
3281 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3282 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3283 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3284 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
3285 && optimize_insn_for_speed_p ())
3287 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
3292 /* Widening (or narrowing) bswap needs special treatment. */
3293 if (unoptab
== bswap_optab
)
3295 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3296 or ROTATERT. First try these directly; if this fails, then try the
3297 obvious pair of shifts with allowed widening, as this will probably
3298 be always more efficient than the other fallback methods. */
3304 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
3306 temp
= expand_binop (mode
, rotl_optab
, op0
,
3307 gen_int_shift_amount (mode
, 8),
3308 target
, unsignedp
, OPTAB_DIRECT
);
3313 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
3315 temp
= expand_binop (mode
, rotr_optab
, op0
,
3316 gen_int_shift_amount (mode
, 8),
3317 target
, unsignedp
, OPTAB_DIRECT
);
3322 last
= get_last_insn ();
3324 temp1
= expand_binop (mode
, ashl_optab
, op0
,
3325 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3326 unsignedp
, OPTAB_WIDEN
);
3327 temp2
= expand_binop (mode
, lshr_optab
, op0
,
3328 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3329 unsignedp
, OPTAB_WIDEN
);
3332 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
3333 unsignedp
, OPTAB_WIDEN
);
3338 delete_insns_since (last
);
3341 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3343 temp
= widen_bswap (int_mode
, op0
, target
);
3347 /* We do not provide a 128-bit bswap in libgcc so force the use of
3348 a double bswap for 64-bit targets. */
3349 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3350 && (UNITS_PER_WORD
== 8
3351 || optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
))
3353 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3362 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3363 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3365 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3368 rtx_insn
*last
= get_last_insn ();
3370 /* For certain operations, we need not actually extend
3371 the narrow operand, as long as we will truncate the
3372 results to the same narrowness. */
3374 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3375 (unoptab
== neg_optab
3376 || unoptab
== one_cmpl_optab
)
3377 && mclass
== MODE_INT
);
3379 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3384 if (mclass
!= MODE_INT
3385 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3388 target
= gen_reg_rtx (mode
);
3389 convert_move (target
, temp
, 0);
3393 return gen_lowpart (mode
, temp
);
3396 delete_insns_since (last
);
3400 /* These can be done a word at a time. */
3401 if (unoptab
== one_cmpl_optab
3402 && is_int_mode (mode
, &int_mode
)
3403 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
3404 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3411 || reg_overlap_mentioned_p (target
, op0
)
3412 || !valid_multiword_target_p (target
))
3413 target
= gen_reg_rtx (int_mode
);
3417 /* Do the actual arithmetic. */
3418 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
3420 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
3421 rtx x
= expand_unop (word_mode
, unoptab
,
3422 operand_subword_force (op0
, i
, int_mode
),
3423 target_piece
, unsignedp
);
3425 if (target_piece
!= x
)
3426 emit_move_insn (target_piece
, x
);
3429 insns
= get_insns ();
3436 /* Emit ~op0 as op0 ^ -1. */
3437 if (unoptab
== one_cmpl_optab
3438 && (SCALAR_INT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3439 && optab_handler (xor_optab
, mode
) != CODE_FOR_nothing
)
3441 temp
= expand_binop (mode
, xor_optab
, op0
, CONSTM1_RTX (mode
),
3442 target
, unsignedp
, OPTAB_DIRECT
);
3447 if (optab_to_code (unoptab
) == NEG
)
3449 /* Try negating floating point values by flipping the sign bit. */
3450 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3452 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
3457 /* If there is no negation pattern, and we have no negative zero,
3458 try subtracting from zero. */
3459 if (!HONOR_SIGNED_ZEROS (mode
))
3461 temp
= expand_binop (mode
, (unoptab
== negv_optab
3462 ? subv_optab
: sub_optab
),
3463 CONST0_RTX (mode
), op0
, target
,
3464 unsignedp
, OPTAB_DIRECT
);
3470 /* Try calculating parity (x) as popcount (x) % 2. */
3471 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3473 temp
= expand_parity (int_mode
, op0
, target
);
3478 /* Try implementing ffs (x) in terms of clz (x). */
3479 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3481 temp
= expand_ffs (int_mode
, op0
, target
);
3486 /* Try implementing ctz (x) in terms of clz (x). */
3487 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3489 temp
= expand_ctz (int_mode
, op0
, target
);
3495 /* Now try a library call in this mode. */
3496 libfunc
= optab_libfunc (unoptab
, mode
);
3502 machine_mode outmode
= mode
;
3504 /* All of these functions return small values. Thus we choose to
3505 have them return something that isn't a double-word. */
3506 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3507 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3508 || unoptab
== parity_optab
)
3510 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3511 optab_libfunc (unoptab
, mode
)));
3515 /* Pass 1 for NO_QUEUE so we don't lose any increments
3516 if the libcall is cse'd or moved. */
3517 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3519 insns
= get_insns ();
3522 target
= gen_reg_rtx (outmode
);
3523 bool trapv
= trapv_unoptab_p (unoptab
);
3525 eq_value
= NULL_RTX
;
3528 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3529 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3530 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3531 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3532 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3533 outmode
, eq_value
, mode
);
3535 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3540 /* It can't be done in this mode. Can we do it in a wider mode? */
3542 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3544 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3546 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3547 || optab_libfunc (unoptab
, wider_mode
))
3550 rtx_insn
*last
= get_last_insn ();
3552 /* For certain operations, we need not actually extend
3553 the narrow operand, as long as we will truncate the
3554 results to the same narrowness. */
3555 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3556 (unoptab
== neg_optab
3557 || unoptab
== one_cmpl_optab
3558 || unoptab
== bswap_optab
)
3559 && mclass
== MODE_INT
);
3561 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3564 /* If we are generating clz using wider mode, adjust the
3565 result. Similarly for clrsb. */
3566 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3569 scalar_int_mode wider_int_mode
3570 = as_a
<scalar_int_mode
> (wider_mode
);
3571 int_mode
= as_a
<scalar_int_mode
> (mode
);
3573 (wider_mode
, sub_optab
, temp
,
3574 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3575 - GET_MODE_PRECISION (int_mode
),
3577 target
, true, OPTAB_DIRECT
);
3580 /* Likewise for bswap. */
3581 if (unoptab
== bswap_optab
&& temp
!= 0)
3583 scalar_int_mode wider_int_mode
3584 = as_a
<scalar_int_mode
> (wider_mode
);
3585 int_mode
= as_a
<scalar_int_mode
> (mode
);
3586 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3587 == GET_MODE_BITSIZE (wider_int_mode
)
3588 && GET_MODE_PRECISION (int_mode
)
3589 == GET_MODE_BITSIZE (int_mode
));
3591 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3592 GET_MODE_BITSIZE (wider_int_mode
)
3593 - GET_MODE_BITSIZE (int_mode
),
3599 if (mclass
!= MODE_INT
)
3602 target
= gen_reg_rtx (mode
);
3603 convert_move (target
, temp
, 0);
3607 return gen_lowpart (mode
, temp
);
3610 delete_insns_since (last
);
3615 /* One final attempt at implementing negation via subtraction,
3616 this time allowing widening of the operand. */
3617 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3620 temp
= expand_binop (mode
,
3621 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3622 CONST0_RTX (mode
), op0
,
3623 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3631 /* Emit code to compute the absolute value of OP0, with result to
3632 TARGET if convenient. (TARGET may be 0.) The return value says
3633 where the result actually is to be found.
3635 MODE is the mode of the operand; the mode of the result is
3636 different but can be deduced from MODE.
3641 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3642 int result_unsignedp
)
3646 if (GET_MODE_CLASS (mode
) != MODE_INT
3648 result_unsignedp
= 1;
3650 /* First try to do it with a special abs instruction. */
3651 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3656 /* For floating point modes, try clearing the sign bit. */
3657 scalar_float_mode float_mode
;
3658 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3660 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3665 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3666 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3667 && !HONOR_SIGNED_ZEROS (mode
))
3669 rtx_insn
*last
= get_last_insn ();
3671 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3674 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3680 delete_insns_since (last
);
3683 /* If this machine has expensive jumps, we can do integer absolute
3684 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3685 where W is the width of MODE. */
3687 scalar_int_mode int_mode
;
3688 if (is_int_mode (mode
, &int_mode
)
3689 && BRANCH_COST (optimize_insn_for_speed_p (),
3692 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3693 GET_MODE_PRECISION (int_mode
) - 1,
3696 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3699 temp
= expand_binop (int_mode
,
3700 result_unsignedp
? sub_optab
: subv_optab
,
3701 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3711 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3712 int result_unsignedp
, int safe
)
3715 rtx_code_label
*op1
;
3717 if (GET_MODE_CLASS (mode
) != MODE_INT
3719 result_unsignedp
= 1;
3721 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3725 /* If that does not win, use conditional jump and negate. */
3727 /* It is safe to use the target if it is the same
3728 as the source if this is also a pseudo register */
3729 if (op0
== target
&& REG_P (op0
)
3730 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3733 op1
= gen_label_rtx ();
3734 if (target
== 0 || ! safe
3735 || GET_MODE (target
) != mode
3736 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3738 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3739 target
= gen_reg_rtx (mode
);
3741 emit_move_insn (target
, op0
);
3744 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3745 NULL_RTX
, NULL
, op1
,
3746 profile_probability::uninitialized ());
3748 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3751 emit_move_insn (target
, op0
);
3757 /* Emit code to compute the one's complement absolute value of OP0
3758 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3759 (TARGET may be NULL_RTX.) The return value says where the result
3760 actually is to be found.
3762 MODE is the mode of the operand; the mode of the result is
3763 different but can be deduced from MODE. */
3766 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3770 /* Not applicable for floating point modes. */
3771 if (FLOAT_MODE_P (mode
))
3774 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3775 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3777 rtx_insn
*last
= get_last_insn ();
3779 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3781 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3787 delete_insns_since (last
);
3790 /* If this machine has expensive jumps, we can do one's complement
3791 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3793 scalar_int_mode int_mode
;
3794 if (is_int_mode (mode
, &int_mode
)
3795 && BRANCH_COST (optimize_insn_for_speed_p (),
3798 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3799 GET_MODE_PRECISION (int_mode
) - 1,
3802 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3812 /* A subroutine of expand_copysign, perform the copysign operation using the
3813 abs and neg primitives advertised to exist on the target. The assumption
3814 is that we have a split register file, and leaving op0 in fp registers,
3815 and not playing with subregs so much, will help the register allocator. */
3818 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3819 int bitpos
, bool op0_is_abs
)
3821 scalar_int_mode imode
;
3822 enum insn_code icode
;
3824 rtx_code_label
*label
;
3829 /* Check if the back end provides an insn that handles signbit for the
3831 icode
= optab_handler (signbit_optab
, mode
);
3832 if (icode
!= CODE_FOR_nothing
)
3834 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3835 sign
= gen_reg_rtx (imode
);
3836 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3840 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3842 if (!int_mode_for_mode (mode
).exists (&imode
))
3844 op1
= gen_lowpart (imode
, op1
);
3851 if (FLOAT_WORDS_BIG_ENDIAN
)
3852 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3854 word
= bitpos
/ BITS_PER_WORD
;
3855 bitpos
= bitpos
% BITS_PER_WORD
;
3856 op1
= operand_subword_force (op1
, word
, mode
);
3859 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3860 sign
= expand_binop (imode
, and_optab
, op1
,
3861 immed_wide_int_const (mask
, imode
),
3862 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3867 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3874 if (target
== NULL_RTX
)
3875 target
= copy_to_reg (op0
);
3877 emit_move_insn (target
, op0
);
3880 label
= gen_label_rtx ();
3881 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3883 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3884 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3886 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3888 emit_move_insn (target
, op0
);
3896 /* A subroutine of expand_copysign, perform the entire copysign operation
3897 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3898 is true if op0 is known to have its sign bit clear. */
3901 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3902 int bitpos
, bool op0_is_abs
)
3904 scalar_int_mode imode
;
3905 int word
, nwords
, i
;
3909 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3911 if (!int_mode_for_mode (mode
).exists (&imode
))
3920 if (FLOAT_WORDS_BIG_ENDIAN
)
3921 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3923 word
= bitpos
/ BITS_PER_WORD
;
3924 bitpos
= bitpos
% BITS_PER_WORD
;
3925 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3928 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3933 || reg_overlap_mentioned_p (target
, op0
)
3934 || reg_overlap_mentioned_p (target
, op1
)
3935 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3936 target
= gen_reg_rtx (mode
);
3942 for (i
= 0; i
< nwords
; ++i
)
3944 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3945 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3951 = expand_binop (imode
, and_optab
, op0_piece
,
3952 immed_wide_int_const (~mask
, imode
),
3953 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3954 op1
= expand_binop (imode
, and_optab
,
3955 operand_subword_force (op1
, i
, mode
),
3956 immed_wide_int_const (mask
, imode
),
3957 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3959 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3960 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3961 if (temp
!= targ_piece
)
3962 emit_move_insn (targ_piece
, temp
);
3965 emit_move_insn (targ_piece
, op0_piece
);
3968 insns
= get_insns ();
3975 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3976 immed_wide_int_const (mask
, imode
),
3977 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3979 op0
= gen_lowpart (imode
, op0
);
3981 op0
= expand_binop (imode
, and_optab
, op0
,
3982 immed_wide_int_const (~mask
, imode
),
3983 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3985 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3986 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3987 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3993 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3994 scalar floating point mode. Return NULL if we do not know how to
3995 expand the operation inline. */
3998 expand_copysign (rtx op0
, rtx op1
, rtx target
)
4000 scalar_float_mode mode
;
4001 const struct real_format
*fmt
;
4005 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
4006 gcc_assert (GET_MODE (op1
) == mode
);
4008 /* First try to do it with a special instruction. */
4009 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
4010 target
, 0, OPTAB_DIRECT
);
4014 fmt
= REAL_MODE_FORMAT (mode
);
4015 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
4019 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
4021 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
4022 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
4026 if (fmt
->signbit_ro
>= 0
4027 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
4028 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
4029 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
4031 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
4032 fmt
->signbit_ro
, op0_is_abs
);
4037 if (fmt
->signbit_rw
< 0)
4039 return expand_copysign_bit (mode
, op0
, op1
, target
,
4040 fmt
->signbit_rw
, op0_is_abs
);
4043 /* Generate an instruction whose insn-code is INSN_CODE,
4044 with two operands: an output TARGET and an input OP0.
4045 TARGET *must* be nonzero, and the output is always stored there.
4046 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4047 the value that is stored into TARGET.
4049 Return false if expansion failed. */
4052 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
4055 class expand_operand ops
[2];
4058 create_output_operand (&ops
[0], target
, GET_MODE (target
));
4059 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
4060 pat
= maybe_gen_insn (icode
, 2, ops
);
4064 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
4066 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
,
4071 if (ops
[0].value
!= target
)
4072 emit_move_insn (target
, ops
[0].value
);
4075 /* Generate an instruction whose insn-code is INSN_CODE,
4076 with two operands: an output TARGET and an input OP0.
4077 TARGET *must* be nonzero, and the output is always stored there.
4078 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4079 the value that is stored into TARGET. */
4082 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
4084 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
4088 struct no_conflict_data
4091 rtx_insn
*first
, *insn
;
4095 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
4096 the currently examined clobber / store has to stay in the list of
4097 insns that constitute the actual libcall block. */
4099 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
4101 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
4103 /* If this inns directly contributes to setting the target, it must stay. */
4104 if (reg_overlap_mentioned_p (p
->target
, dest
))
4105 p
->must_stay
= true;
4106 /* If we haven't committed to keeping any other insns in the list yet,
4107 there is nothing more to check. */
4108 else if (p
->insn
== p
->first
)
4110 /* If this insn sets / clobbers a register that feeds one of the insns
4111 already in the list, this insn has to stay too. */
4112 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
4113 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
4114 || reg_used_between_p (dest
, p
->first
, p
->insn
)
4115 /* Likewise if this insn depends on a register set by a previous
4116 insn in the list, or if it sets a result (presumably a hard
4117 register) that is set or clobbered by a previous insn.
4118 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4119 SET_DEST perform the former check on the address, and the latter
4120 check on the MEM. */
4121 || (GET_CODE (set
) == SET
4122 && (modified_in_p (SET_SRC (set
), p
->first
)
4123 || modified_in_p (SET_DEST (set
), p
->first
)
4124 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
4125 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
4126 p
->must_stay
= true;
4130 /* Emit code to make a call to a constant function or a library call.
4132 INSNS is a list containing all insns emitted in the call.
4133 These insns leave the result in RESULT. Our block is to copy RESULT
4134 to TARGET, which is logically equivalent to EQUIV.
4136 We first emit any insns that set a pseudo on the assumption that these are
4137 loading constants into registers; doing so allows them to be safely cse'ed
4138 between blocks. Then we emit all the other insns in the block, followed by
4139 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4140 note with an operand of EQUIV. */
4143 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
4144 bool equiv_may_trap
)
4146 rtx final_dest
= target
;
4147 rtx_insn
*next
, *last
, *insn
;
4149 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4150 into a MEM later. Protect the libcall block from this change. */
4151 if (! REG_P (target
) || REG_USERVAR_P (target
))
4152 target
= gen_reg_rtx (GET_MODE (target
));
4154 /* If we're using non-call exceptions, a libcall corresponding to an
4155 operation that may trap may also trap. */
4156 /* ??? See the comment in front of make_reg_eh_region_note. */
4157 if (cfun
->can_throw_non_call_exceptions
4158 && (equiv_may_trap
|| may_trap_p (equiv
)))
4160 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4163 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4166 int lp_nr
= INTVAL (XEXP (note
, 0));
4167 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
4168 remove_note (insn
, note
);
4174 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4175 reg note to indicate that this call cannot throw or execute a nonlocal
4176 goto (unless there is already a REG_EH_REGION note, in which case
4178 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4180 make_reg_eh_region_note_nothrow_nononlocal (insn
);
4183 /* First emit all insns that set pseudos. Remove them from the list as
4184 we go. Avoid insns that set pseudos which were referenced in previous
4185 insns. These can be generated by move_by_pieces, for example,
4186 to update an address. Similarly, avoid insns that reference things
4187 set in previous insns. */
4189 for (insn
= insns
; insn
; insn
= next
)
4191 rtx set
= single_set (insn
);
4193 next
= NEXT_INSN (insn
);
4195 if (set
!= 0 && REG_P (SET_DEST (set
))
4196 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4198 struct no_conflict_data data
;
4200 data
.target
= const0_rtx
;
4204 note_stores (insn
, no_conflict_move_test
, &data
);
4205 if (! data
.must_stay
)
4207 if (PREV_INSN (insn
))
4208 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
4213 SET_PREV_INSN (next
) = PREV_INSN (insn
);
4219 /* Some ports use a loop to copy large arguments onto the stack.
4220 Don't move anything outside such a loop. */
4225 /* Write the remaining insns followed by the final copy. */
4226 for (insn
= insns
; insn
; insn
= next
)
4228 next
= NEXT_INSN (insn
);
4233 last
= emit_move_insn (target
, result
);
4235 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
4237 if (final_dest
!= target
)
4238 emit_move_insn (final_dest
, target
);
4242 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
4244 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
4247 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4248 PURPOSE describes how this comparison will be used. CODE is the rtx
4249 comparison code we will be using.
4251 ??? Actually, CODE is slightly weaker than that. A target is still
4252 required to implement all of the normal bcc operations, but not
4253 required to implement all (or any) of the unordered bcc operations. */
4256 can_compare_p (enum rtx_code code
, machine_mode mode
,
4257 enum can_compare_purpose purpose
)
4260 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
4263 enum insn_code icode
;
4265 if (purpose
== ccp_jump
4266 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
4267 && insn_operand_matches (icode
, 0, test
))
4269 if (purpose
== ccp_store_flag
4270 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
4271 && insn_operand_matches (icode
, 1, test
))
4273 if (purpose
== ccp_cmov
4274 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
4277 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
4278 PUT_MODE (test
, mode
);
4280 while (mode
!= VOIDmode
);
4285 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4288 unsigned_optab_p (enum rtx_code code
)
4290 return code
== LTU
|| code
== LEU
|| code
== GTU
|| code
== GEU
;
4293 /* Return whether the backend-emitted comparison for code CODE, comparing
4294 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4295 operand OPNO of pattern ICODE. */
4298 insn_predicate_matches_p (enum insn_code icode
, unsigned int opno
,
4299 enum rtx_code code
, machine_mode mask_mode
,
4300 machine_mode value_mode
)
4302 rtx reg1
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4303 rtx reg2
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4304 rtx test
= alloca_rtx_fmt_ee (code
, mask_mode
, reg1
, reg2
);
4305 return insn_operand_matches (icode
, opno
, test
);
4308 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4309 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4313 can_vec_cmp_compare_p (enum rtx_code code
, machine_mode value_mode
,
4314 machine_mode mask_mode
)
4316 enum insn_code icode
4317 = get_vec_cmp_icode (value_mode
, mask_mode
, unsigned_optab_p (code
));
4318 if (icode
== CODE_FOR_nothing
)
4321 return insn_predicate_matches_p (icode
, 1, code
, mask_mode
, value_mode
);
4324 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4325 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4329 can_vcond_compare_p (enum rtx_code code
, machine_mode value_mode
,
4330 machine_mode cmp_op_mode
)
4332 enum insn_code icode
4333 = get_vcond_icode (value_mode
, cmp_op_mode
, unsigned_optab_p (code
));
4334 if (icode
== CODE_FOR_nothing
)
4337 return insn_predicate_matches_p (icode
, 3, code
, value_mode
, cmp_op_mode
);
4340 /* Return whether the backend can emit vector set instructions for inserting
4341 element into vector at variable index position. */
4344 can_vec_set_var_idx_p (machine_mode vec_mode
)
4346 if (!VECTOR_MODE_P (vec_mode
))
4349 machine_mode inner_mode
= GET_MODE_INNER (vec_mode
);
4351 rtx reg1
= alloca_raw_REG (vec_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4352 rtx reg2
= alloca_raw_REG (inner_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4354 enum insn_code icode
= optab_handler (vec_set_optab
, vec_mode
);
4356 const struct insn_data_d
*data
= &insn_data
[icode
];
4357 machine_mode idx_mode
= data
->operand
[2].mode
;
4359 rtx reg3
= alloca_raw_REG (idx_mode
, LAST_VIRTUAL_REGISTER
+ 3);
4361 return icode
!= CODE_FOR_nothing
&& insn_operand_matches (icode
, 0, reg1
)
4362 && insn_operand_matches (icode
, 1, reg2
)
4363 && insn_operand_matches (icode
, 2, reg3
);
4366 /* This function is called when we are going to emit a compare instruction that
4367 compares the values found in X and Y, using the rtl operator COMPARISON.
4369 If they have mode BLKmode, then SIZE specifies the size of both operands.
4371 UNSIGNEDP nonzero says that the operands are unsigned;
4372 this matters if they need to be widened (as given by METHODS).
4374 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4375 if we failed to produce one.
4377 *PMODE is the mode of the inputs (in case they are const_int).
4379 This function performs all the setup necessary so that the caller only has
4380 to emit a single comparison insn. This setup can involve doing a BLKmode
4381 comparison or emitting a library call to perform the comparison if no insn
4382 is available to handle it.
4383 The values which are passed in through pointers can be modified; the caller
4384 should perform the comparison on the modified values. Constant
4385 comparisons must have already been folded. */
4388 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4389 int unsignedp
, enum optab_methods methods
,
4390 rtx
*ptest
, machine_mode
*pmode
)
4392 machine_mode mode
= *pmode
;
4394 machine_mode cmp_mode
;
4396 /* The other methods are not needed. */
4397 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
4398 || methods
== OPTAB_LIB_WIDEN
);
4400 if (CONST_SCALAR_INT_P (y
))
4401 canonicalize_comparison (mode
, &comparison
, &y
);
4403 /* If we are optimizing, force expensive constants into a register. */
4404 if (CONSTANT_P (x
) && optimize
4405 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
4406 > COSTS_N_INSNS (1))
4407 && can_create_pseudo_p ())
4408 x
= force_reg (mode
, x
);
4410 if (CONSTANT_P (y
) && optimize
4411 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
4412 > COSTS_N_INSNS (1))
4413 && can_create_pseudo_p ())
4414 y
= force_reg (mode
, y
);
4416 /* Don't let both operands fail to indicate the mode. */
4417 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4418 x
= force_reg (mode
, x
);
4419 if (mode
== VOIDmode
)
4420 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4422 /* Handle all BLKmode compares. */
4424 if (mode
== BLKmode
)
4426 machine_mode result_mode
;
4427 enum insn_code cmp_code
;
4430 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4434 /* Try to use a memory block compare insn - either cmpstr
4435 or cmpmem will do. */
4436 opt_scalar_int_mode cmp_mode_iter
;
4437 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
4439 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
4440 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4441 if (cmp_code
== CODE_FOR_nothing
)
4442 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4443 if (cmp_code
== CODE_FOR_nothing
)
4444 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4445 if (cmp_code
== CODE_FOR_nothing
)
4448 /* Must make sure the size fits the insn's mode. */
4449 if (CONST_INT_P (size
)
4450 ? UINTVAL (size
) > GET_MODE_MASK (cmp_mode
)
4451 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
4452 > GET_MODE_BITSIZE (cmp_mode
)))
4455 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4456 result
= gen_reg_rtx (result_mode
);
4457 size
= convert_to_mode (cmp_mode
, size
, 1);
4458 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4460 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4461 *pmode
= result_mode
;
4465 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4468 /* Otherwise call a library function. */
4469 result
= emit_block_comp_via_libcall (x
, y
, size
);
4473 mode
= TYPE_MODE (integer_type_node
);
4474 methods
= OPTAB_LIB_WIDEN
;
4478 /* Don't allow operands to the compare to trap, as that can put the
4479 compare and branch in different basic blocks. */
4480 if (cfun
->can_throw_non_call_exceptions
)
4482 if (!can_create_pseudo_p () && (may_trap_p (x
) || may_trap_p (y
)))
4485 x
= copy_to_reg (x
);
4487 y
= copy_to_reg (y
);
4490 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4492 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
4493 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4494 if (icode
!= CODE_FOR_nothing
4495 && insn_operand_matches (icode
, 0, test
))
4504 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4505 FOR_EACH_WIDER_MODE_FROM (cmp_mode
, mode
)
4507 enum insn_code icode
;
4508 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4509 if (icode
!= CODE_FOR_nothing
4510 && insn_operand_matches (icode
, 0, test
))
4512 rtx_insn
*last
= get_last_insn ();
4513 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4514 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4516 && insn_operand_matches (icode
, 1, op0
)
4517 && insn_operand_matches (icode
, 2, op1
))
4519 XEXP (test
, 0) = op0
;
4520 XEXP (test
, 1) = op1
;
4525 delete_insns_since (last
);
4528 if (methods
== OPTAB_DIRECT
)
4532 if (methods
!= OPTAB_LIB_WIDEN
)
4535 if (SCALAR_FLOAT_MODE_P (mode
))
4537 /* Small trick if UNORDERED isn't implemented by the hardware. */
4538 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
4540 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4546 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4551 machine_mode ret_mode
;
4553 /* Handle a libcall just for the mode we are using. */
4554 libfunc
= optab_libfunc (cmp_optab
, mode
);
4555 gcc_assert (libfunc
);
4557 /* If we want unsigned, and this mode has a distinct unsigned
4558 comparison routine, use that. */
4561 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4566 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4567 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4568 ret_mode
, x
, mode
, y
, mode
);
4570 /* There are two kinds of comparison routines. Biased routines
4571 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4572 of gcc expect that the comparison operation is equivalent
4573 to the modified comparison. For signed comparisons compare the
4574 result against 1 in the biased case, and zero in the unbiased
4575 case. For unsigned comparisons always compare against 1 after
4576 biasing the unbiased result by adding 1. This gives us a way to
4578 The comparisons in the fixed-point helper library are always
4583 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4586 x
= plus_constant (ret_mode
, result
, 1);
4592 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4602 /* Before emitting an insn with code ICODE, make sure that X, which is going
4603 to be used for operand OPNUM of the insn, is converted from mode MODE to
4604 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4605 that it is accepted by the operand predicate. Return the new value. */
4608 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4609 machine_mode wider_mode
, int unsignedp
)
4611 if (mode
!= wider_mode
)
4612 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4614 if (!insn_operand_matches (icode
, opnum
, x
))
4616 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4617 if (reload_completed
)
4619 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4621 x
= copy_to_mode_reg (op_mode
, x
);
4627 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4628 we can do the branch. */
4631 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4632 direct_optab cmp_optab
, profile_probability prob
,
4635 machine_mode optab_mode
;
4636 enum mode_class mclass
;
4637 enum insn_code icode
;
4640 mclass
= GET_MODE_CLASS (mode
);
4641 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4642 icode
= optab_handler (cmp_optab
, optab_mode
);
4644 gcc_assert (icode
!= CODE_FOR_nothing
);
4645 gcc_assert (test_branch
|| insn_operand_matches (icode
, 0, test
));
4647 insn
= emit_jump_insn (GEN_FCN (icode
) (XEXP (test
, 0),
4648 XEXP (test
, 1), label
));
4650 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4651 XEXP (test
, 1), label
));
4653 if (prob
.initialized_p ()
4654 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4657 && any_condjump_p (insn
)
4658 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4659 add_reg_br_prob_note (insn
, prob
);
4662 /* PTEST points to a comparison that compares its first operand with zero.
4663 Check to see if it can be performed as a bit-test-and-branch instead.
4664 On success, return the instruction that performs the bit-test-and-branch
4665 and replace the second operand of *PTEST with the bit number to test.
4666 On failure, return CODE_FOR_nothing and leave *PTEST unchanged.
4668 Note that the comparison described by *PTEST should not be taken
4669 literally after a successful return. *PTEST is just a convenient
4670 place to store the two operands of the bit-and-test.
4672 VAL must contain the original tree expression for the first operand
4675 static enum insn_code
4676 validate_test_and_branch (tree val
, rtx
*ptest
, machine_mode
*pmode
, optab
*res
)
4678 if (!val
|| TREE_CODE (val
) != SSA_NAME
)
4679 return CODE_FOR_nothing
;
4681 machine_mode mode
= TYPE_MODE (TREE_TYPE (val
));
4685 if (GET_CODE (test
) == EQ
)
4686 optab
= tbranch_eq_optab
;
4687 else if (GET_CODE (test
) == NE
)
4688 optab
= tbranch_ne_optab
;
4690 return CODE_FOR_nothing
;
4694 /* If the target supports the testbit comparison directly, great. */
4695 auto icode
= direct_optab_handler (optab
, mode
);
4696 if (icode
== CODE_FOR_nothing
)
4699 if (tree_zero_one_valued_p (val
))
4701 auto pos
= BITS_BIG_ENDIAN
? GET_MODE_BITSIZE (mode
) - 1 : 0;
4702 XEXP (test
, 1) = gen_int_mode (pos
, mode
);
4708 wide_int wcst
= get_nonzero_bits (val
);
4710 return CODE_FOR_nothing
;
4714 if ((bitpos
= wi::exact_log2 (wcst
)) == -1)
4715 return CODE_FOR_nothing
;
4717 auto pos
= BITS_BIG_ENDIAN
? GET_MODE_BITSIZE (mode
) - 1 - bitpos
: bitpos
;
4718 XEXP (test
, 1) = gen_int_mode (pos
, mode
);
4724 /* Generate code to compare X with Y so that the condition codes are
4725 set and to jump to LABEL if the condition is true. If X is a
4726 constant and Y is not a constant, then the comparison is swapped to
4727 ensure that the comparison RTL has the canonical form.
4729 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4730 need to be widened. UNSIGNEDP is also used to select the proper
4731 branch condition code.
4733 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4735 MODE is the mode of the inputs (in case they are const_int).
4737 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4738 It will be potentially converted into an unsigned variant based on
4739 UNSIGNEDP to select a proper jump instruction.
4741 PROB is the probability of jumping to LABEL. If the comparison is against
4742 zero then VAL contains the expression from which the non-zero RTL is
4746 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4747 machine_mode mode
, int unsignedp
, tree val
, rtx label
,
4748 profile_probability prob
)
4750 rtx op0
= x
, op1
= y
;
4753 /* Swap operands and condition to ensure canonical RTL. */
4754 if (swap_commutative_operands_p (x
, y
)
4755 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4758 comparison
= swap_condition (comparison
);
4761 /* If OP0 is still a constant, then both X and Y must be constants
4762 or the opposite comparison is not supported. Force X into a register
4763 to create canonical RTL. */
4764 if (CONSTANT_P (op0
))
4765 op0
= force_reg (mode
, op0
);
4768 comparison
= unsigned_condition (comparison
);
4770 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4773 /* Check if we're comparing a truth type with 0, and if so check if
4774 the target supports tbranch. */
4775 machine_mode tmode
= mode
;
4777 if (op1
== CONST0_RTX (GET_MODE (op1
))
4778 && validate_test_and_branch (val
, &test
, &tmode
,
4779 &optab
) != CODE_FOR_nothing
)
4781 emit_cmp_and_jump_insn_1 (test
, tmode
, label
, optab
, prob
, true);
4785 emit_cmp_and_jump_insn_1 (test
, mode
, label
, cbranch_optab
, prob
, false);
4788 /* Overloaded version of emit_cmp_and_jump_insns in which VAL is unknown. */
4791 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4792 machine_mode mode
, int unsignedp
, rtx label
,
4793 profile_probability prob
)
4795 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, NULL
,
4800 /* Emit a library call comparison between floating point X and Y.
4801 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4804 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4805 rtx
*ptest
, machine_mode
*pmode
)
4807 enum rtx_code swapped
= swap_condition (comparison
);
4808 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4809 machine_mode orig_mode
= GET_MODE (x
);
4811 rtx true_rtx
, false_rtx
;
4812 rtx value
, target
, equiv
;
4815 bool reversed_p
= false;
4816 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4818 FOR_EACH_WIDER_MODE_FROM (mode
, orig_mode
)
4820 if (code_to_optab (comparison
)
4821 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4824 if (code_to_optab (swapped
)
4825 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4828 comparison
= swapped
;
4832 if (code_to_optab (reversed
)
4833 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4835 comparison
= reversed
;
4841 gcc_assert (mode
!= VOIDmode
);
4843 if (mode
!= orig_mode
)
4845 x
= convert_to_mode (mode
, x
, 0);
4846 y
= convert_to_mode (mode
, y
, 0);
4849 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4850 the RTL. The allows the RTL optimizers to delete the libcall if the
4851 condition can be determined at compile-time. */
4852 if (comparison
== UNORDERED
4853 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4855 true_rtx
= const_true_rtx
;
4856 false_rtx
= const0_rtx
;
4863 true_rtx
= const0_rtx
;
4864 false_rtx
= const_true_rtx
;
4868 true_rtx
= const_true_rtx
;
4869 false_rtx
= const0_rtx
;
4873 true_rtx
= const1_rtx
;
4874 false_rtx
= const0_rtx
;
4878 true_rtx
= const0_rtx
;
4879 false_rtx
= constm1_rtx
;
4883 true_rtx
= constm1_rtx
;
4884 false_rtx
= const0_rtx
;
4888 true_rtx
= const0_rtx
;
4889 false_rtx
= const1_rtx
;
4897 if (comparison
== UNORDERED
)
4899 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4900 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4901 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4902 temp
, const_true_rtx
, equiv
);
4906 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4907 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4908 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4909 equiv
, true_rtx
, false_rtx
);
4913 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4914 cmp_mode
, x
, mode
, y
, mode
);
4915 insns
= get_insns ();
4918 target
= gen_reg_rtx (cmp_mode
);
4919 emit_libcall_block (insns
, target
, value
, equiv
);
4921 if (comparison
== UNORDERED
4922 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4924 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4926 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4931 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4934 emit_indirect_jump (rtx loc
)
4936 if (!targetm
.have_indirect_jump ())
4937 sorry ("indirect jumps are not available on this target");
4940 class expand_operand ops
[1];
4941 create_address_operand (&ops
[0], loc
);
4942 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4948 /* Emit a conditional move instruction if the machine supports one for that
4949 condition and machine mode.
4951 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4952 the mode to use should they be constants. If it is VOIDmode, they cannot
4955 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4956 should be stored there. MODE is the mode to use should they be constants.
4957 If it is VOIDmode, they cannot both be constants.
4959 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4960 is not supported. */
4963 emit_conditional_move (rtx target
, struct rtx_comparison comp
,
4965 machine_mode mode
, int unsignedp
)
4969 enum insn_code icode
;
4970 enum rtx_code reversed
;
4972 /* If the two source operands are identical, that's just a move. */
4974 if (rtx_equal_p (op2
, op3
))
4977 target
= gen_reg_rtx (mode
);
4979 emit_move_insn (target
, op3
);
4983 /* If one operand is constant, make it the second one. Only do this
4984 if the other operand is not constant as well. */
4986 if (swap_commutative_operands_p (comp
.op0
, comp
.op1
))
4988 std::swap (comp
.op0
, comp
.op1
);
4989 comp
.code
= swap_condition (comp
.code
);
4992 /* get_condition will prefer to generate LT and GT even if the old
4993 comparison was against zero, so undo that canonicalization here since
4994 comparisons against zero are cheaper. */
4996 if (comp
.code
== LT
&& comp
.op1
== const1_rtx
)
4997 comp
.code
= LE
, comp
.op1
= const0_rtx
;
4998 else if (comp
.code
== GT
&& comp
.op1
== constm1_rtx
)
4999 comp
.code
= GE
, comp
.op1
= const0_rtx
;
5001 if (comp
.mode
== VOIDmode
)
5002 comp
.mode
= GET_MODE (comp
.op0
);
5004 enum rtx_code orig_code
= comp
.code
;
5005 bool swapped
= false;
5006 if (swap_commutative_operands_p (op2
, op3
)
5008 reversed_comparison_code_parts (comp
.code
, comp
.op0
, comp
.op1
, NULL
))
5011 std::swap (op2
, op3
);
5012 comp
.code
= reversed
;
5016 if (mode
== VOIDmode
)
5017 mode
= GET_MODE (op2
);
5019 icode
= direct_optab_handler (movcc_optab
, mode
);
5021 if (icode
== CODE_FOR_nothing
)
5025 target
= gen_reg_rtx (mode
);
5027 for (int pass
= 0; ; pass
++)
5029 comp
.code
= unsignedp
? unsigned_condition (comp
.code
) : comp
.code
;
5031 simplify_gen_relational (comp
.code
, VOIDmode
,
5032 comp
.mode
, comp
.op0
, comp
.op1
);
5034 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5035 punt and let the caller figure out how best to deal with this
5037 if (COMPARISON_P (comparison
))
5039 saved_pending_stack_adjust save
;
5040 save_pending_stack_adjust (&save
);
5041 last
= get_last_insn ();
5042 do_pending_stack_adjust ();
5043 machine_mode cmpmode
= comp
.mode
;
5044 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
5045 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
5046 OPTAB_WIDEN
, &comparison
, &cmpmode
);
5049 rtx res
= emit_conditional_move_1 (target
, comparison
,
5051 if (res
!= NULL_RTX
)
5054 delete_insns_since (last
);
5055 restore_pending_stack_adjust (&save
);
5061 /* If the preferred op2/op3 order is not usable, retry with other
5062 operand order, perhaps it will expand successfully. */
5064 comp
.code
= orig_code
;
5065 else if ((reversed
=
5066 reversed_comparison_code_parts (orig_code
, comp
.op0
, comp
.op1
,
5069 comp
.code
= reversed
;
5072 std::swap (op2
, op3
);
5076 /* Helper function that, in addition to COMPARISON, also tries
5077 the reversed REV_COMPARISON with swapped OP2 and OP3. As opposed
5078 to when we pass the specific constituents of a comparison, no
5079 additional insns are emitted for it. It might still be necessary
5080 to emit more than one insn for the final conditional move, though. */
5083 emit_conditional_move (rtx target
, rtx comparison
, rtx rev_comparison
,
5084 rtx op2
, rtx op3
, machine_mode mode
)
5086 rtx res
= emit_conditional_move_1 (target
, comparison
, op2
, op3
, mode
);
5088 if (res
!= NULL_RTX
)
5091 return emit_conditional_move_1 (target
, rev_comparison
, op3
, op2
, mode
);
5094 /* Helper for emitting a conditional move. */
5097 emit_conditional_move_1 (rtx target
, rtx comparison
,
5098 rtx op2
, rtx op3
, machine_mode mode
)
5100 enum insn_code icode
;
5102 if (comparison
== NULL_RTX
|| !COMPARISON_P (comparison
))
5105 /* If the two source operands are identical, that's just a move.
5106 As the comparison comes in non-canonicalized, we must make
5107 sure not to discard any possible side effects. If there are
5108 side effects, just let the target handle it. */
5109 if (!side_effects_p (comparison
) && rtx_equal_p (op2
, op3
))
5112 target
= gen_reg_rtx (mode
);
5114 emit_move_insn (target
, op3
);
5118 if (mode
== VOIDmode
)
5119 mode
= GET_MODE (op2
);
5121 icode
= direct_optab_handler (movcc_optab
, mode
);
5123 if (icode
== CODE_FOR_nothing
)
5127 target
= gen_reg_rtx (mode
);
5129 class expand_operand ops
[4];
5131 create_output_operand (&ops
[0], target
, mode
);
5132 create_fixed_operand (&ops
[1], comparison
);
5133 create_input_operand (&ops
[2], op2
, mode
);
5134 create_input_operand (&ops
[3], op3
, mode
);
5136 if (maybe_expand_insn (icode
, 4, ops
))
5138 if (ops
[0].value
!= target
)
5139 convert_move (target
, ops
[0].value
, false);
5147 /* Emit a conditional negate or bitwise complement using the
5148 negcc or notcc optabs if available. Return NULL_RTX if such operations
5149 are not available. Otherwise return the RTX holding the result.
5150 TARGET is the desired destination of the result. COMP is the comparison
5151 on which to negate. If COND is true move into TARGET the negation
5152 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
5153 CODE is either NEG or NOT. MODE is the machine mode in which the
5154 operation is performed. */
5157 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
5158 machine_mode mode
, rtx cond
, rtx op1
,
5161 optab op
= unknown_optab
;
5164 else if (code
== NOT
)
5169 insn_code icode
= direct_optab_handler (op
, mode
);
5171 if (icode
== CODE_FOR_nothing
)
5175 target
= gen_reg_rtx (mode
);
5177 rtx_insn
*last
= get_last_insn ();
5178 class expand_operand ops
[4];
5180 create_output_operand (&ops
[0], target
, mode
);
5181 create_fixed_operand (&ops
[1], cond
);
5182 create_input_operand (&ops
[2], op1
, mode
);
5183 create_input_operand (&ops
[3], op2
, mode
);
5185 if (maybe_expand_insn (icode
, 4, ops
))
5187 if (ops
[0].value
!= target
)
5188 convert_move (target
, ops
[0].value
, false);
5192 delete_insns_since (last
);
5196 /* Emit a conditional addition instruction if the machine supports one for that
5197 condition and machine mode.
5199 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
5200 the mode to use should they be constants. If it is VOIDmode, they cannot
5203 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
5204 should be stored there. MODE is the mode to use should they be constants.
5205 If it is VOIDmode, they cannot both be constants.
5207 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
5208 is not supported. */
5211 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5212 machine_mode cmode
, rtx op2
, rtx op3
,
5213 machine_mode mode
, int unsignedp
)
5217 enum insn_code icode
;
5219 /* If one operand is constant, make it the second one. Only do this
5220 if the other operand is not constant as well. */
5222 if (swap_commutative_operands_p (op0
, op1
))
5224 std::swap (op0
, op1
);
5225 code
= swap_condition (code
);
5228 /* get_condition will prefer to generate LT and GT even if the old
5229 comparison was against zero, so undo that canonicalization here since
5230 comparisons against zero are cheaper. */
5231 if (code
== LT
&& op1
== const1_rtx
)
5232 code
= LE
, op1
= const0_rtx
;
5233 else if (code
== GT
&& op1
== constm1_rtx
)
5234 code
= GE
, op1
= const0_rtx
;
5236 if (cmode
== VOIDmode
)
5237 cmode
= GET_MODE (op0
);
5239 if (mode
== VOIDmode
)
5240 mode
= GET_MODE (op2
);
5242 icode
= optab_handler (addcc_optab
, mode
);
5244 if (icode
== CODE_FOR_nothing
)
5248 target
= gen_reg_rtx (mode
);
5250 code
= unsignedp
? unsigned_condition (code
) : code
;
5251 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
5253 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5254 return NULL and let the caller figure out how best to deal with this
5256 if (!COMPARISON_P (comparison
))
5259 do_pending_stack_adjust ();
5260 last
= get_last_insn ();
5261 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
5262 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
5263 &comparison
, &cmode
);
5266 class expand_operand ops
[4];
5268 create_output_operand (&ops
[0], target
, mode
);
5269 create_fixed_operand (&ops
[1], comparison
);
5270 create_input_operand (&ops
[2], op2
, mode
);
5271 create_input_operand (&ops
[3], op3
, mode
);
5272 if (maybe_expand_insn (icode
, 4, ops
))
5274 if (ops
[0].value
!= target
)
5275 convert_move (target
, ops
[0].value
, false);
5279 delete_insns_since (last
);
5283 /* These functions attempt to generate an insn body, rather than
5284 emitting the insn, but if the gen function already emits them, we
5285 make no attempt to turn them back into naked patterns. */
5287 /* Generate and return an insn body to add Y to X. */
5290 gen_add2_insn (rtx x
, rtx y
)
5292 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
5294 gcc_assert (insn_operand_matches (icode
, 0, x
));
5295 gcc_assert (insn_operand_matches (icode
, 1, x
));
5296 gcc_assert (insn_operand_matches (icode
, 2, y
));
5298 return GEN_FCN (icode
) (x
, x
, y
);
5301 /* Generate and return an insn body to add r1 and c,
5302 storing the result in r0. */
5305 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
5307 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
5309 if (icode
== CODE_FOR_nothing
5310 || !insn_operand_matches (icode
, 0, r0
)
5311 || !insn_operand_matches (icode
, 1, r1
)
5312 || !insn_operand_matches (icode
, 2, c
))
5315 return GEN_FCN (icode
) (r0
, r1
, c
);
5319 have_add2_insn (rtx x
, rtx y
)
5321 enum insn_code icode
;
5323 gcc_assert (GET_MODE (x
) != VOIDmode
);
5325 icode
= optab_handler (add_optab
, GET_MODE (x
));
5327 if (icode
== CODE_FOR_nothing
)
5330 if (!insn_operand_matches (icode
, 0, x
)
5331 || !insn_operand_matches (icode
, 1, x
)
5332 || !insn_operand_matches (icode
, 2, y
))
5338 /* Generate and return an insn body to add Y to X. */
5341 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
5343 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5345 gcc_assert (insn_operand_matches (icode
, 0, x
));
5346 gcc_assert (insn_operand_matches (icode
, 1, y
));
5347 gcc_assert (insn_operand_matches (icode
, 2, z
));
5349 return GEN_FCN (icode
) (x
, y
, z
);
5352 /* Return true if the target implements an addptr pattern and X, Y,
5353 and Z are valid for the pattern predicates. */
5356 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
5358 enum insn_code icode
;
5360 gcc_assert (GET_MODE (x
) != VOIDmode
);
5362 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5364 if (icode
== CODE_FOR_nothing
)
5367 if (!insn_operand_matches (icode
, 0, x
)
5368 || !insn_operand_matches (icode
, 1, y
)
5369 || !insn_operand_matches (icode
, 2, z
))
5375 /* Generate and return an insn body to subtract Y from X. */
5378 gen_sub2_insn (rtx x
, rtx y
)
5380 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
5382 gcc_assert (insn_operand_matches (icode
, 0, x
));
5383 gcc_assert (insn_operand_matches (icode
, 1, x
));
5384 gcc_assert (insn_operand_matches (icode
, 2, y
));
5386 return GEN_FCN (icode
) (x
, x
, y
);
5389 /* Generate and return an insn body to subtract r1 and c,
5390 storing the result in r0. */
5393 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
5395 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
5397 if (icode
== CODE_FOR_nothing
5398 || !insn_operand_matches (icode
, 0, r0
)
5399 || !insn_operand_matches (icode
, 1, r1
)
5400 || !insn_operand_matches (icode
, 2, c
))
5403 return GEN_FCN (icode
) (r0
, r1
, c
);
5407 have_sub2_insn (rtx x
, rtx y
)
5409 enum insn_code icode
;
5411 gcc_assert (GET_MODE (x
) != VOIDmode
);
5413 icode
= optab_handler (sub_optab
, GET_MODE (x
));
5415 if (icode
== CODE_FOR_nothing
)
5418 if (!insn_operand_matches (icode
, 0, x
)
5419 || !insn_operand_matches (icode
, 1, x
)
5420 || !insn_operand_matches (icode
, 2, y
))
5426 /* Generate the body of an insn to extend Y (with mode MFROM)
5427 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5430 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
5431 machine_mode mfrom
, int unsignedp
)
5433 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5434 return GEN_FCN (icode
) (x
, y
);
5437 /* Generate code to convert FROM to floating point
5438 and store in TO. FROM must be fixed point and not VOIDmode.
5439 UNSIGNEDP nonzero means regard FROM as unsigned.
5440 Normally this is done by correcting the final value
5441 if it is negative. */
5444 expand_float (rtx to
, rtx from
, int unsignedp
)
5446 enum insn_code icode
;
5448 scalar_mode from_mode
, to_mode
;
5449 machine_mode fmode
, imode
;
5450 bool can_do_signed
= false;
5452 /* Crash now, because we won't be able to decide which mode to use. */
5453 gcc_assert (GET_MODE (from
) != VOIDmode
);
5455 /* Look for an insn to do the conversion. Do it in the specified
5456 modes if possible; otherwise convert either input, output or both to
5457 wider mode. If the integer mode is wider than the mode of FROM,
5458 we can do the conversion signed even if the input is unsigned. */
5460 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
5461 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
5463 int doing_unsigned
= unsignedp
;
5465 if (fmode
!= GET_MODE (to
)
5466 && (significand_size (fmode
)
5467 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
5470 icode
= can_float_p (fmode
, imode
, unsignedp
);
5471 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5473 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5474 if (scode
!= CODE_FOR_nothing
)
5475 can_do_signed
= true;
5476 if (imode
!= GET_MODE (from
))
5477 icode
= scode
, doing_unsigned
= 0;
5480 if (icode
!= CODE_FOR_nothing
)
5482 if (imode
!= GET_MODE (from
))
5483 from
= convert_to_mode (imode
, from
, unsignedp
);
5485 if (fmode
!= GET_MODE (to
))
5486 target
= gen_reg_rtx (fmode
);
5488 emit_unop_insn (icode
, target
, from
,
5489 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5492 convert_move (to
, target
, 0);
5497 /* Unsigned integer, and no way to convert directly. Convert as signed,
5498 then unconditionally adjust the result. */
5501 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
5502 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
5504 opt_scalar_mode fmode_iter
;
5505 rtx_code_label
*label
= gen_label_rtx ();
5507 REAL_VALUE_TYPE offset
;
5509 /* Look for a usable floating mode FMODE wider than the source and at
5510 least as wide as the target. Using FMODE will avoid rounding woes
5511 with unsigned values greater than the signed maximum value. */
5513 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
5515 scalar_mode fmode
= fmode_iter
.require ();
5516 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
5517 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
5521 if (!fmode_iter
.exists (&fmode
))
5523 /* There is no such mode. Pretend the target is wide enough. */
5526 /* Avoid double-rounding when TO is narrower than FROM. */
5527 if ((significand_size (fmode
) + 1)
5528 < GET_MODE_PRECISION (from_mode
))
5531 rtx_code_label
*neglabel
= gen_label_rtx ();
5533 /* Don't use TARGET if it isn't a register, is a hard register,
5534 or is the wrong mode. */
5536 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5537 || GET_MODE (target
) != fmode
)
5538 target
= gen_reg_rtx (fmode
);
5541 do_pending_stack_adjust ();
5543 /* Test whether the sign bit is set. */
5544 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5547 /* The sign bit is not set. Convert as signed. */
5548 expand_float (target
, from
, 0);
5549 emit_jump_insn (targetm
.gen_jump (label
));
5552 /* The sign bit is set.
5553 Convert to a usable (positive signed) value by shifting right
5554 one bit, while remembering if a nonzero bit was shifted
5555 out; i.e., compute (from & 1) | (from >> 1). */
5557 emit_label (neglabel
);
5558 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5559 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5560 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5561 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5563 expand_float (target
, temp
, 0);
5565 /* Multiply by 2 to undo the shift above. */
5566 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5567 target
, 0, OPTAB_LIB_WIDEN
);
5569 emit_move_insn (target
, temp
);
5571 do_pending_stack_adjust ();
5577 /* If we are about to do some arithmetic to correct for an
5578 unsigned operand, do it in a pseudo-register. */
5580 if (to_mode
!= fmode
5581 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5582 target
= gen_reg_rtx (fmode
);
5584 /* Convert as signed integer to floating. */
5585 expand_float (target
, from
, 0);
5587 /* If FROM is negative (and therefore TO is negative),
5588 correct its value by 2**bitwidth. */
5590 do_pending_stack_adjust ();
5591 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
5595 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
5596 temp
= expand_binop (fmode
, add_optab
, target
,
5597 const_double_from_real_value (offset
, fmode
),
5598 target
, 0, OPTAB_LIB_WIDEN
);
5600 emit_move_insn (target
, temp
);
5602 do_pending_stack_adjust ();
5607 /* No hardware instruction available; call a library routine. */
5612 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5614 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
5615 from
= convert_to_mode (SImode
, from
, unsignedp
);
5617 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5618 gcc_assert (libfunc
);
5622 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5623 GET_MODE (to
), from
, GET_MODE (from
));
5624 insns
= get_insns ();
5627 emit_libcall_block (insns
, target
, value
,
5628 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5629 GET_MODE (to
), from
));
5634 /* Copy result to requested destination
5635 if we have been computing in a temp location. */
5639 if (GET_MODE (target
) == GET_MODE (to
))
5640 emit_move_insn (to
, target
);
5642 convert_move (to
, target
, 0);
5646 /* Generate code to convert FROM to fixed point and store in TO. FROM
5647 must be floating point. */
5650 expand_fix (rtx to
, rtx from
, int unsignedp
)
5652 enum insn_code icode
;
5654 machine_mode fmode
, imode
;
5655 opt_scalar_mode fmode_iter
;
5656 bool must_trunc
= false;
5658 /* We first try to find a pair of modes, one real and one integer, at
5659 least as wide as FROM and TO, respectively, in which we can open-code
5660 this conversion. If the integer mode is wider than the mode of TO,
5661 we can do the conversion either signed or unsigned. */
5663 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5664 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5666 int doing_unsigned
= unsignedp
;
5668 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5669 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5670 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5672 if (icode
!= CODE_FOR_nothing
)
5674 rtx_insn
*last
= get_last_insn ();
5676 if (fmode
!= GET_MODE (from
))
5677 from1
= convert_to_mode (fmode
, from
, 0);
5681 rtx temp
= gen_reg_rtx (GET_MODE (from1
));
5682 from1
= expand_unop (GET_MODE (from1
), ftrunc_optab
, from1
,
5686 if (imode
!= GET_MODE (to
))
5687 target
= gen_reg_rtx (imode
);
5689 if (maybe_emit_unop_insn (icode
, target
, from1
,
5690 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5693 convert_move (to
, target
, unsignedp
);
5696 delete_insns_since (last
);
5700 /* For an unsigned conversion, there is one more way to do it.
5701 If we have a signed conversion, we generate code that compares
5702 the real value to the largest representable positive number. If if
5703 is smaller, the conversion is done normally. Otherwise, subtract
5704 one plus the highest signed number, convert, and add it back.
5706 We only need to check all real modes, since we know we didn't find
5707 anything with a wider integer mode.
5709 This code used to extend FP value into mode wider than the destination.
5710 This is needed for decimal float modes which cannot accurately
5711 represent one plus the highest signed number of the same size, but
5712 not for binary modes. Consider, for instance conversion from SFmode
5715 The hot path through the code is dealing with inputs smaller than 2^63
5716 and doing just the conversion, so there is no bits to lose.
5718 In the other path we know the value is positive in the range 2^63..2^64-1
5719 inclusive. (as for other input overflow happens and result is undefined)
5720 So we know that the most important bit set in mantissa corresponds to
5721 2^63. The subtraction of 2^63 should not generate any rounding as it
5722 simply clears out that bit. The rest is trivial. */
5724 scalar_int_mode to_mode
;
5726 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5727 && HWI_COMPUTABLE_MODE_P (to_mode
))
5728 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5730 scalar_mode fmode
= fmode_iter
.require ();
5731 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5733 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5734 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5737 REAL_VALUE_TYPE offset
;
5739 rtx_code_label
*lab1
, *lab2
;
5742 bitsize
= GET_MODE_PRECISION (to_mode
);
5743 real_2expN (&offset
, bitsize
- 1, fmode
);
5744 limit
= const_double_from_real_value (offset
, fmode
);
5745 lab1
= gen_label_rtx ();
5746 lab2
= gen_label_rtx ();
5748 if (fmode
!= GET_MODE (from
))
5749 from
= convert_to_mode (fmode
, from
, 0);
5751 /* See if we need to do the subtraction. */
5752 do_pending_stack_adjust ();
5753 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5754 GET_MODE (from
), 0, lab1
);
5756 /* If not, do the signed "fix" and branch around fixup code. */
5757 expand_fix (to
, from
, 0);
5758 emit_jump_insn (targetm
.gen_jump (lab2
));
5761 /* Otherwise, subtract 2**(N-1), convert to signed number,
5762 then add 2**(N-1). Do the addition using XOR since this
5763 will often generate better code. */
5765 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5766 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5767 expand_fix (to
, target
, 0);
5768 target
= expand_binop (to_mode
, xor_optab
, to
,
5770 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5772 to
, 1, OPTAB_LIB_WIDEN
);
5775 emit_move_insn (to
, target
);
5779 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5781 /* Make a place for a REG_NOTE and add it. */
5782 insn
= emit_move_insn (to
, to
);
5783 set_dst_reg_note (insn
, REG_EQUAL
,
5784 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5793 /* We can't do it with an insn, so use a library call. But first ensure
5794 that the mode of TO is at least as wide as SImode, since those are the
5795 only library calls we know about. */
5797 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5799 target
= gen_reg_rtx (SImode
);
5801 expand_fix (target
, from
, unsignedp
);
5809 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5810 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5811 gcc_assert (libfunc
);
5815 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5816 GET_MODE (to
), from
, GET_MODE (from
));
5817 insns
= get_insns ();
5820 emit_libcall_block (insns
, target
, value
,
5821 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5822 GET_MODE (to
), from
));
5827 if (GET_MODE (to
) == GET_MODE (target
))
5828 emit_move_insn (to
, target
);
5830 convert_move (to
, target
, 0);
5835 /* Promote integer arguments for a libcall if necessary.
5836 emit_library_call_value cannot do the promotion because it does not
5837 know if it should do a signed or unsigned promotion. This is because
5838 there are no tree types defined for libcalls. */
5841 prepare_libcall_arg (rtx arg
, int uintp
)
5843 scalar_int_mode mode
;
5844 machine_mode arg_mode
;
5845 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5847 /* If we need to promote the integer function argument we need to do
5848 it here instead of inside emit_library_call_value because in
5849 emit_library_call_value we don't know if we should do a signed or
5850 unsigned promotion. */
5853 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5854 &unsigned_p
, NULL_TREE
, 0);
5855 if (arg_mode
!= mode
)
5856 return convert_to_mode (arg_mode
, arg
, uintp
);
5861 /* Generate code to convert FROM or TO a fixed-point.
5862 If UINTP is true, either TO or FROM is an unsigned integer.
5863 If SATP is true, we need to saturate the result. */
5866 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5868 machine_mode to_mode
= GET_MODE (to
);
5869 machine_mode from_mode
= GET_MODE (from
);
5871 enum rtx_code this_code
;
5872 enum insn_code code
;
5877 if (to_mode
== from_mode
)
5879 emit_move_insn (to
, from
);
5885 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5886 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5890 tab
= satp
? satfract_optab
: fract_optab
;
5891 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5893 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5894 if (code
!= CODE_FOR_nothing
)
5896 emit_unop_insn (code
, to
, from
, this_code
);
5900 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5901 gcc_assert (libfunc
);
5903 from
= prepare_libcall_arg (from
, uintp
);
5904 from_mode
= GET_MODE (from
);
5907 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5909 insns
= get_insns ();
5912 emit_libcall_block (insns
, to
, value
,
5913 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5916 /* Generate code to convert FROM to fixed point and store in TO. FROM
5917 must be floating point, TO must be signed. Use the conversion optab
5918 TAB to do the conversion. */
5921 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5923 enum insn_code icode
;
5925 machine_mode fmode
, imode
;
5927 /* We first try to find a pair of modes, one real and one integer, at
5928 least as wide as FROM and TO, respectively, in which we can open-code
5929 this conversion. If the integer mode is wider than the mode of TO,
5930 we can do the conversion either signed or unsigned. */
5932 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5933 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5935 icode
= convert_optab_handler (tab
, imode
, fmode
,
5936 insn_optimization_type ());
5937 if (icode
!= CODE_FOR_nothing
)
5939 rtx_insn
*last
= get_last_insn ();
5940 if (fmode
!= GET_MODE (from
))
5941 from
= convert_to_mode (fmode
, from
, 0);
5943 if (imode
!= GET_MODE (to
))
5944 target
= gen_reg_rtx (imode
);
5946 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5948 delete_insns_since (last
);
5952 convert_move (to
, target
, 0);
5960 /* Report whether we have an instruction to perform the operation
5961 specified by CODE on operands of mode MODE. */
5963 have_insn_for (enum rtx_code code
, machine_mode mode
)
5965 return (code_to_optab (code
)
5966 && (optab_handler (code_to_optab (code
), mode
)
5967 != CODE_FOR_nothing
));
5970 /* Print information about the current contents of the optabs on
5974 debug_optab_libfuncs (void)
5978 /* Dump the arithmetic optabs. */
5979 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5980 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5982 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5985 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5986 fprintf (stderr
, "%s\t%s:\t%s\n",
5987 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5993 /* Dump the conversion optabs. */
5994 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5995 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5996 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5998 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
6002 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6003 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6004 GET_RTX_NAME (optab_to_code ((optab
) i
)),
6012 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6013 CODE. Return 0 on failure. */
6016 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
6018 machine_mode mode
= GET_MODE (op1
);
6019 enum insn_code icode
;
6023 if (mode
== VOIDmode
)
6026 icode
= optab_handler (ctrap_optab
, mode
);
6027 if (icode
== CODE_FOR_nothing
)
6030 /* Some targets only accept a zero trap code. */
6031 if (!insn_operand_matches (icode
, 3, tcode
))
6034 do_pending_stack_adjust ();
6036 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
6041 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
6044 /* If that failed, then give up. */
6052 insn
= get_insns ();
6057 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
6058 or unsigned operation code. */
6061 get_rtx_code_1 (enum tree_code tcode
, bool unsignedp
)
6073 code
= unsignedp
? LTU
: LT
;
6076 code
= unsignedp
? LEU
: LE
;
6079 code
= unsignedp
? GTU
: GT
;
6082 code
= unsignedp
? GEU
: GE
;
6085 case UNORDERED_EXPR
:
6125 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6126 or unsigned operation code. */
6129 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6131 enum rtx_code code
= get_rtx_code_1 (tcode
, unsignedp
);
6132 gcc_assert (code
!= UNKNOWN
);
6136 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
6137 select signed or unsigned operators. OPNO holds the index of the
6138 first comparison operand for insn ICODE. Do not generate the
6139 compare instruction itself. */
6142 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
6143 tree t_op0
, tree t_op1
, bool unsignedp
,
6144 enum insn_code icode
, unsigned int opno
)
6146 class expand_operand ops
[2];
6147 rtx rtx_op0
, rtx_op1
;
6148 machine_mode m0
, m1
;
6149 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
6151 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
6153 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
6154 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
6155 cases, use the original mode. */
6156 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6158 m0
= GET_MODE (rtx_op0
);
6160 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
6162 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6164 m1
= GET_MODE (rtx_op1
);
6166 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
6168 create_input_operand (&ops
[0], rtx_op0
, m0
);
6169 create_input_operand (&ops
[1], rtx_op1
, m1
);
6170 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
6172 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
6175 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
6176 the first vec_perm operand, assuming the second operand (for left shift
6177 first operand) is a constant vector of zeros. Return the shift distance
6178 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
6179 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
6180 shift or vec_shl_optab for left shift. */
6182 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
,
6185 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
6186 poly_int64 first
= sel
[0];
6187 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
6190 if (shift_optab
== vec_shl_optab
)
6193 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
6195 unsigned firstidx
= 0;
6196 for (unsigned int i
= 0; i
< nelt
; i
++)
6198 if (known_eq (sel
[i
], nelt
))
6200 if (i
== 0 || firstidx
)
6205 ? maybe_ne (sel
[i
], nelt
+ i
- firstidx
)
6206 : maybe_ge (sel
[i
], nelt
))
6214 else if (!sel
.series_p (0, 1, first
, 1))
6217 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
6219 for (unsigned int i
= 1; i
< nelt
; i
++)
6221 poly_int64 expected
= i
+ first
;
6222 /* Indices into the second vector are all equivalent. */
6223 if (maybe_lt (sel
[i
], nelt
)
6224 ? maybe_ne (sel
[i
], expected
)
6225 : maybe_lt (expected
, nelt
))
6230 return gen_int_shift_amount (mode
, first
* bitsize
);
6233 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
6236 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
6237 rtx v0
, rtx v1
, rtx sel
)
6239 machine_mode tmode
= GET_MODE (target
);
6240 machine_mode smode
= GET_MODE (sel
);
6241 class expand_operand ops
[4];
6243 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
6244 || related_int_vector_mode (tmode
).require () == smode
);
6245 create_output_operand (&ops
[0], target
, tmode
);
6246 create_input_operand (&ops
[3], sel
, smode
);
6248 /* Make an effort to preserve v0 == v1. The target expander is able to
6249 rely on this to determine if we're permuting a single input operand. */
6250 if (rtx_equal_p (v0
, v1
))
6252 if (!insn_operand_matches (icode
, 1, v0
))
6253 v0
= force_reg (tmode
, v0
);
6254 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
6255 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
6257 create_fixed_operand (&ops
[1], v0
);
6258 create_fixed_operand (&ops
[2], v0
);
6262 create_input_operand (&ops
[1], v0
, tmode
);
6263 create_input_operand (&ops
[2], v1
, tmode
);
6266 if (maybe_expand_insn (icode
, 4, ops
))
6267 return ops
[0].value
;
6271 /* Implement a permutation of vectors v0 and v1 using the permutation
6272 vector in SEL and return the result. Use TARGET to hold the result
6273 if nonnull and convenient.
6275 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
6276 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
6277 to have a particular mode. */
6280 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
6281 const vec_perm_builder
&sel
, machine_mode sel_mode
,
6284 if (!target
|| !register_operand (target
, mode
))
6285 target
= gen_reg_rtx (mode
);
6287 /* Set QIMODE to a different vector mode with byte elements.
6288 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6289 machine_mode qimode
;
6290 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
6293 rtx_insn
*last
= get_last_insn ();
6295 bool single_arg_p
= rtx_equal_p (v0
, v1
);
6296 /* Always specify two input vectors here and leave the target to handle
6297 cases in which the inputs are equal. Not all backends can cope with
6298 the single-input representation when testing for a double-input
6299 target instruction. */
6300 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
6302 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6303 if the second (for vec_shr) or first (for vec_shl) vector is all
6305 insn_code shift_code
= CODE_FOR_nothing
;
6306 insn_code shift_code_qi
= CODE_FOR_nothing
;
6307 optab shift_optab
= unknown_optab
;
6309 if (v1
== CONST0_RTX (GET_MODE (v1
)))
6310 shift_optab
= vec_shr_optab
;
6311 else if (v0
== CONST0_RTX (GET_MODE (v0
)))
6313 shift_optab
= vec_shl_optab
;
6316 if (shift_optab
!= unknown_optab
)
6318 shift_code
= optab_handler (shift_optab
, mode
);
6319 shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
6320 ? optab_handler (shift_optab
, qimode
)
6321 : CODE_FOR_nothing
);
6323 if (shift_code
!= CODE_FOR_nothing
|| shift_code_qi
!= CODE_FOR_nothing
)
6325 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
, shift_optab
);
6328 class expand_operand ops
[3];
6329 if (shift_amt
== const0_rtx
)
6331 if (shift_code
!= CODE_FOR_nothing
)
6333 create_output_operand (&ops
[0], target
, mode
);
6334 create_input_operand (&ops
[1], v2
, mode
);
6335 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6336 if (maybe_expand_insn (shift_code
, 3, ops
))
6337 return ops
[0].value
;
6339 if (shift_code_qi
!= CODE_FOR_nothing
)
6341 rtx tmp
= gen_reg_rtx (qimode
);
6342 create_output_operand (&ops
[0], tmp
, qimode
);
6343 create_input_operand (&ops
[1], gen_lowpart (qimode
, v2
), qimode
);
6344 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6345 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
6346 return gen_lowpart (mode
, ops
[0].value
);
6351 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
6356 gcc_checking_assert (GET_MODE (v0
) == GET_MODE (v1
));
6357 machine_mode op_mode
= GET_MODE (v0
);
6358 if (targetm
.vectorize
.vec_perm_const (mode
, op_mode
, target
, v0
, v1
,
6363 /* Fall back to a constant byte-based permutation. */
6364 vec_perm_indices qimode_indices
;
6365 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
6366 if (qimode
!= VOIDmode
)
6368 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
6369 target_qi
= gen_reg_rtx (qimode
);
6370 v0_qi
= gen_lowpart (qimode
, v0
);
6371 v1_qi
= gen_lowpart (qimode
, v1
);
6372 if (targetm
.vectorize
.vec_perm_const
!= NULL
6373 && targetm
.vectorize
.vec_perm_const (qimode
, qimode
, target_qi
, v0_qi
,
6374 v1_qi
, qimode_indices
))
6375 return gen_lowpart (mode
, target_qi
);
6378 v0
= force_reg (mode
, v0
);
6381 v1
= force_reg (mode
, v1
);
6383 /* Otherwise expand as a fully variable permuation. */
6385 /* The optabs are only defined for selectors with the same width
6386 as the values being permuted. */
6387 machine_mode required_sel_mode
;
6388 if (!related_int_vector_mode (mode
).exists (&required_sel_mode
))
6390 delete_insns_since (last
);
6394 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6395 If that isn't the mode we want then we need to prove that using
6396 REQUIRED_SEL_MODE is OK. */
6397 if (sel_mode
!= required_sel_mode
)
6399 if (!selector_fits_mode_p (required_sel_mode
, indices
))
6401 delete_insns_since (last
);
6404 sel_mode
= required_sel_mode
;
6407 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
6408 if (icode
!= CODE_FOR_nothing
)
6410 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
6411 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
6416 if (qimode
!= VOIDmode
6417 && selector_fits_mode_p (qimode
, qimode_indices
))
6419 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6420 if (icode
!= CODE_FOR_nothing
)
6422 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
6423 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
6425 return gen_lowpart (mode
, tmp
);
6429 delete_insns_since (last
);
6433 /* Implement a permutation of vectors v0 and v1 using the permutation
6434 vector in SEL and return the result. Use TARGET to hold the result
6435 if nonnull and convenient.
6437 MODE is the mode of the vectors being permuted (V0 and V1).
6438 SEL must have the integer equivalent of MODE and is known to be
6439 unsuitable for permutes with a constant permutation vector. */
6442 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6444 enum insn_code icode
;
6448 u
= GET_MODE_UNIT_SIZE (mode
);
6450 if (!target
|| GET_MODE (target
) != mode
)
6451 target
= gen_reg_rtx (mode
);
6453 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6454 if (icode
!= CODE_FOR_nothing
)
6456 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
6461 /* As a special case to aid several targets, lower the element-based
6462 permutation to a byte-based permutation and try again. */
6463 machine_mode qimode
;
6464 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
6465 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
6467 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6468 if (icode
== CODE_FOR_nothing
)
6471 /* Multiply each element by its byte size. */
6472 machine_mode selmode
= GET_MODE (sel
);
6474 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
6475 NULL
, 0, OPTAB_DIRECT
);
6477 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
6478 gen_int_shift_amount (selmode
, exact_log2 (u
)),
6479 NULL
, 0, OPTAB_DIRECT
);
6480 gcc_assert (sel
!= NULL
);
6482 /* Broadcast the low byte each element into each of its bytes.
6483 The encoding has U interleaved stepped patterns, one for each
6484 byte of an element. */
6485 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
6486 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
6487 for (i
= 0; i
< 3; ++i
)
6488 for (unsigned int j
= 0; j
< u
; ++j
)
6489 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
6490 sel
= gen_lowpart (qimode
, sel
);
6491 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
6492 gcc_assert (sel
!= NULL
);
6494 /* Add the byte offset to each byte element. */
6495 /* Note that the definition of the indicies here is memory ordering,
6496 so there should be no difference between big and little endian. */
6497 rtx_vector_builder
byte_indices (qimode
, u
, 1);
6498 for (i
= 0; i
< u
; ++i
)
6499 byte_indices
.quick_push (GEN_INT (i
));
6500 tmp
= byte_indices
.build ();
6501 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
6502 sel
, 0, OPTAB_DIRECT
);
6503 gcc_assert (sel_qi
!= NULL
);
6505 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
6506 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
6507 gen_lowpart (qimode
, v1
), sel_qi
);
6509 tmp
= gen_lowpart (mode
, tmp
);
6513 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6514 Use TARGET for the result if nonnull and convenient. */
6517 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
6519 class expand_operand ops
[3];
6520 enum insn_code icode
;
6521 machine_mode emode
= GET_MODE_INNER (vmode
);
6523 icode
= direct_optab_handler (vec_series_optab
, vmode
);
6524 gcc_assert (icode
!= CODE_FOR_nothing
);
6526 create_output_operand (&ops
[0], target
, vmode
);
6527 create_input_operand (&ops
[1], op0
, emode
);
6528 create_input_operand (&ops
[2], op1
, emode
);
6530 expand_insn (icode
, 3, ops
);
6531 return ops
[0].value
;
6534 /* Generate insns for a vector comparison into a mask. */
6537 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
6539 class expand_operand ops
[4];
6540 enum insn_code icode
;
6542 machine_mode mask_mode
= TYPE_MODE (type
);
6546 enum tree_code tcode
;
6548 op0a
= TREE_OPERAND (exp
, 0);
6549 op0b
= TREE_OPERAND (exp
, 1);
6550 tcode
= TREE_CODE (exp
);
6552 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
6553 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
6555 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
6556 if (icode
== CODE_FOR_nothing
)
6558 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
6559 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
6560 if (icode
== CODE_FOR_nothing
)
6564 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
6565 unsignedp
, icode
, 2);
6566 create_output_operand (&ops
[0], target
, mask_mode
);
6567 create_fixed_operand (&ops
[1], comparison
);
6568 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
6569 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
6570 expand_insn (icode
, 4, ops
);
6571 return ops
[0].value
;
6574 /* Expand a highpart multiply. */
6577 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
6578 rtx target
, bool uns_p
)
6580 class expand_operand eops
[3];
6581 enum insn_code icode
;
6587 method
= can_mult_highpart_p (mode
, uns_p
);
6593 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6594 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
6597 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6598 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6601 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6602 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6603 if (BYTES_BIG_ENDIAN
)
6604 std::swap (tab1
, tab2
);
6610 icode
= optab_handler (tab1
, mode
);
6611 wmode
= insn_data
[icode
].operand
[0].mode
;
6612 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
6613 GET_MODE_NUNITS (mode
)));
6614 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
6616 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6617 create_input_operand (&eops
[1], op0
, mode
);
6618 create_input_operand (&eops
[2], op1
, mode
);
6619 expand_insn (icode
, 3, eops
);
6620 m1
= gen_lowpart (mode
, eops
[0].value
);
6622 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6623 create_input_operand (&eops
[1], op0
, mode
);
6624 create_input_operand (&eops
[2], op1
, mode
);
6625 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
6626 m2
= gen_lowpart (mode
, eops
[0].value
);
6628 vec_perm_builder sel
;
6631 /* The encoding has 2 interleaved stepped patterns. */
6632 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
6633 for (i
= 0; i
< 6; ++i
)
6634 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
6635 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
6639 /* The encoding has a single interleaved stepped pattern. */
6640 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
6641 for (i
= 0; i
< 3; ++i
)
6642 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
6645 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
6648 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6652 find_cc_set (rtx x
, const_rtx pat
, void *data
)
6654 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
6655 && GET_CODE (pat
) == SET
)
6657 rtx
*p_cc_reg
= (rtx
*) data
;
6658 gcc_assert (!*p_cc_reg
);
6663 /* This is a helper function for the other atomic operations. This function
6664 emits a loop that contains SEQ that iterates until a compare-and-swap
6665 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6666 a set of instructions that takes a value from OLD_REG as an input and
6667 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6668 set to the current contents of MEM. After SEQ, a compare-and-swap will
6669 attempt to update MEM with NEW_REG. The function returns true when the
6670 loop was generated successfully. */
6673 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6675 machine_mode mode
= GET_MODE (mem
);
6676 rtx_code_label
*label
;
6677 rtx cmp_reg
, success
, oldval
;
6679 /* The loop we want to generate looks like
6685 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6689 Note that we only do the plain load from memory once. Subsequent
6690 iterations use the value loaded by the compare-and-swap pattern. */
6692 label
= gen_label_rtx ();
6693 cmp_reg
= gen_reg_rtx (mode
);
6695 emit_move_insn (cmp_reg
, mem
);
6697 emit_move_insn (old_reg
, cmp_reg
);
6703 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6704 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6708 if (oldval
!= cmp_reg
)
6709 emit_move_insn (cmp_reg
, oldval
);
6711 /* Mark this jump predicted not taken. */
6712 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6713 GET_MODE (success
), 1, label
,
6714 profile_probability::guessed_never ());
6719 /* This function tries to emit an atomic_exchange intruction. VAL is written
6720 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6721 using TARGET if possible. */
6724 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6726 machine_mode mode
= GET_MODE (mem
);
6727 enum insn_code icode
;
6729 /* If the target supports the exchange directly, great. */
6730 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6731 if (icode
!= CODE_FOR_nothing
)
6733 class expand_operand ops
[4];
6735 create_output_operand (&ops
[0], target
, mode
);
6736 create_fixed_operand (&ops
[1], mem
);
6737 create_input_operand (&ops
[2], val
, mode
);
6738 create_integer_operand (&ops
[3], model
);
6739 if (maybe_expand_insn (icode
, 4, ops
))
6740 return ops
[0].value
;
6746 /* This function tries to implement an atomic exchange operation using
6747 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6748 The previous contents of *MEM are returned, using TARGET if possible.
6749 Since this instructionn is an acquire barrier only, stronger memory
6750 models may require additional barriers to be emitted. */
6753 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6754 enum memmodel model
)
6756 machine_mode mode
= GET_MODE (mem
);
6757 enum insn_code icode
;
6758 rtx_insn
*last_insn
= get_last_insn ();
6760 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6762 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6763 exists, and the memory model is stronger than acquire, add a release
6764 barrier before the instruction. */
6766 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6767 expand_mem_thread_fence (model
);
6769 if (icode
!= CODE_FOR_nothing
)
6771 class expand_operand ops
[3];
6772 create_output_operand (&ops
[0], target
, mode
);
6773 create_fixed_operand (&ops
[1], mem
);
6774 create_input_operand (&ops
[2], val
, mode
);
6775 if (maybe_expand_insn (icode
, 3, ops
))
6776 return ops
[0].value
;
6779 /* If an external test-and-set libcall is provided, use that instead of
6780 any external compare-and-swap that we might get from the compare-and-
6781 swap-loop expansion later. */
6782 if (!can_compare_and_swap_p (mode
, false))
6784 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6785 if (libfunc
!= NULL
)
6789 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6790 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6791 mode
, addr
, ptr_mode
,
6796 /* If the test_and_set can't be emitted, eliminate any barrier that might
6797 have been emitted. */
6798 delete_insns_since (last_insn
);
6802 /* This function tries to implement an atomic exchange operation using a
6803 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6804 *MEM are returned, using TARGET if possible. No memory model is required
6805 since a compare_and_swap loop is seq-cst. */
6808 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6810 machine_mode mode
= GET_MODE (mem
);
6812 if (can_compare_and_swap_p (mode
, true))
6814 if (!target
|| !register_operand (target
, mode
))
6815 target
= gen_reg_rtx (mode
);
6816 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6823 /* This function tries to implement an atomic test-and-set operation
6824 using the atomic_test_and_set instruction pattern. A boolean value
6825 is returned from the operation, using TARGET if possible. */
6828 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6830 machine_mode pat_bool_mode
;
6831 class expand_operand ops
[3];
6833 if (!targetm
.have_atomic_test_and_set ())
6836 /* While we always get QImode from __atomic_test_and_set, we get
6837 other memory modes from __sync_lock_test_and_set. Note that we
6838 use no endian adjustment here. This matches the 4.6 behavior
6839 in the Sparc backend. */
6840 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6841 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6842 if (GET_MODE (mem
) != QImode
)
6843 mem
= adjust_address_nv (mem
, QImode
, 0);
6845 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6846 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6847 create_fixed_operand (&ops
[1], mem
);
6848 create_integer_operand (&ops
[2], model
);
6850 if (maybe_expand_insn (icode
, 3, ops
))
6851 return ops
[0].value
;
6855 /* This function expands the legacy _sync_lock test_and_set operation which is
6856 generally an atomic exchange. Some limited targets only allow the
6857 constant 1 to be stored. This is an ACQUIRE operation.
6859 TARGET is an optional place to stick the return value.
6860 MEM is where VAL is stored. */
6863 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6867 /* Try an atomic_exchange first. */
6868 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6872 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6873 MEMMODEL_SYNC_ACQUIRE
);
6877 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6881 /* If there are no other options, try atomic_test_and_set if the value
6882 being stored is 1. */
6883 if (val
== const1_rtx
)
6884 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6889 /* This function expands the atomic test_and_set operation:
6890 atomically store a boolean TRUE into MEM and return the previous value.
6892 MEMMODEL is the memory model variant to use.
6893 TARGET is an optional place to stick the return value. */
6896 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6898 machine_mode mode
= GET_MODE (mem
);
6899 rtx ret
, trueval
, subtarget
;
6901 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6905 /* Be binary compatible with non-default settings of trueval, and different
6906 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6907 another only has atomic-exchange. */
6908 if (targetm
.atomic_test_and_set_trueval
== 1)
6910 trueval
= const1_rtx
;
6911 subtarget
= target
? target
: gen_reg_rtx (mode
);
6915 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6916 subtarget
= gen_reg_rtx (mode
);
6919 /* Try the atomic-exchange optab... */
6920 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6922 /* ... then an atomic-compare-and-swap loop ... */
6924 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6926 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6928 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6930 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6931 things with the value 1. Thus we try again without trueval. */
6932 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6933 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6935 /* Failing all else, assume a single threaded environment and simply
6936 perform the operation. */
6939 /* If the result is ignored skip the move to target. */
6940 if (subtarget
!= const0_rtx
)
6941 emit_move_insn (subtarget
, mem
);
6943 emit_move_insn (mem
, trueval
);
6947 /* Recall that have to return a boolean value; rectify if trueval
6948 is not exactly one. */
6949 if (targetm
.atomic_test_and_set_trueval
!= 1)
6950 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6955 /* This function expands the atomic exchange operation:
6956 atomically store VAL in MEM and return the previous value in MEM.
6958 MEMMODEL is the memory model variant to use.
6959 TARGET is an optional place to stick the return value. */
6962 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6964 machine_mode mode
= GET_MODE (mem
);
6967 /* If loads are not atomic for the required size and we are not called to
6968 provide a __sync builtin, do not do anything so that we stay consistent
6969 with atomic loads of the same size. */
6970 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6973 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6975 /* Next try a compare-and-swap loop for the exchange. */
6977 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6982 /* This function expands the atomic compare exchange operation:
6984 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6985 *PTARGET_OVAL is an optional place to store the old value from memory.
6986 Both target parameters may be NULL or const0_rtx to indicate that we do
6987 not care about that return value. Both target parameters are updated on
6988 success to the actual location of the corresponding result.
6990 MEMMODEL is the memory model variant to use.
6992 The return value of the function is true for success. */
6995 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6996 rtx mem
, rtx expected
, rtx desired
,
6997 bool is_weak
, enum memmodel succ_model
,
6998 enum memmodel fail_model
)
7000 machine_mode mode
= GET_MODE (mem
);
7001 class expand_operand ops
[8];
7002 enum insn_code icode
;
7003 rtx target_oval
, target_bool
= NULL_RTX
;
7006 /* If loads are not atomic for the required size and we are not called to
7007 provide a __sync builtin, do not do anything so that we stay consistent
7008 with atomic loads of the same size. */
7009 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
7012 /* Load expected into a register for the compare and swap. */
7013 if (MEM_P (expected
))
7014 expected
= copy_to_reg (expected
);
7016 /* Make sure we always have some place to put the return oldval.
7017 Further, make sure that place is distinct from the input expected,
7018 just in case we need that path down below. */
7019 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
7020 ptarget_oval
= NULL
;
7022 if (ptarget_oval
== NULL
7023 || (target_oval
= *ptarget_oval
) == NULL
7024 || reg_overlap_mentioned_p (expected
, target_oval
))
7025 target_oval
= gen_reg_rtx (mode
);
7027 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
7028 if (icode
!= CODE_FOR_nothing
)
7030 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
7032 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
7033 ptarget_bool
= NULL
;
7035 /* Make sure we always have a place for the bool operand. */
7036 if (ptarget_bool
== NULL
7037 || (target_bool
= *ptarget_bool
) == NULL
7038 || GET_MODE (target_bool
) != bool_mode
)
7039 target_bool
= gen_reg_rtx (bool_mode
);
7041 /* Emit the compare_and_swap. */
7042 create_output_operand (&ops
[0], target_bool
, bool_mode
);
7043 create_output_operand (&ops
[1], target_oval
, mode
);
7044 create_fixed_operand (&ops
[2], mem
);
7045 create_input_operand (&ops
[3], expected
, mode
);
7046 create_input_operand (&ops
[4], desired
, mode
);
7047 create_integer_operand (&ops
[5], is_weak
);
7048 create_integer_operand (&ops
[6], succ_model
);
7049 create_integer_operand (&ops
[7], fail_model
);
7050 if (maybe_expand_insn (icode
, 8, ops
))
7052 /* Return success/failure. */
7053 target_bool
= ops
[0].value
;
7054 target_oval
= ops
[1].value
;
7059 /* Otherwise fall back to the original __sync_val_compare_and_swap
7060 which is always seq-cst. */
7061 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
7062 if (icode
!= CODE_FOR_nothing
)
7066 create_output_operand (&ops
[0], target_oval
, mode
);
7067 create_fixed_operand (&ops
[1], mem
);
7068 create_input_operand (&ops
[2], expected
, mode
);
7069 create_input_operand (&ops
[3], desired
, mode
);
7070 if (!maybe_expand_insn (icode
, 4, ops
))
7073 target_oval
= ops
[0].value
;
7075 /* If the caller isn't interested in the boolean return value,
7076 skip the computation of it. */
7077 if (ptarget_bool
== NULL
)
7080 /* Otherwise, work out if the compare-and-swap succeeded. */
7082 if (have_insn_for (COMPARE
, CCmode
))
7083 note_stores (get_last_insn (), find_cc_set
, &cc_reg
);
7086 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
7087 const0_rtx
, VOIDmode
, 0, 1);
7090 goto success_bool_from_val
;
7093 /* Also check for library support for __sync_val_compare_and_swap. */
7094 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
7095 if (libfunc
!= NULL
)
7097 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7098 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
7099 mode
, addr
, ptr_mode
,
7100 expected
, mode
, desired
, mode
);
7101 emit_move_insn (target_oval
, target
);
7103 /* Compute the boolean return value only if requested. */
7105 goto success_bool_from_val
;
7113 success_bool_from_val
:
7114 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
7115 expected
, VOIDmode
, 1, 1);
7117 /* Make sure that the oval output winds up where the caller asked. */
7119 *ptarget_oval
= target_oval
;
7121 *ptarget_bool
= target_bool
;
7125 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
7128 expand_asm_memory_blockage (void)
7132 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
7133 rtvec_alloc (0), rtvec_alloc (0),
7134 rtvec_alloc (0), UNKNOWN_LOCATION
);
7135 MEM_VOLATILE_P (asm_op
) = 1;
7137 clob
= gen_rtx_SCRATCH (VOIDmode
);
7138 clob
= gen_rtx_MEM (BLKmode
, clob
);
7139 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
7141 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
7144 /* Do not propagate memory accesses across this point. */
7147 expand_memory_blockage (void)
7149 if (targetm
.have_memory_blockage ())
7150 emit_insn (targetm
.gen_memory_blockage ());
7152 expand_asm_memory_blockage ();
7155 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
7156 same time clobbering the register set specified by REGS. */
7159 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs
)
7161 rtx asm_op
, clob_mem
;
7163 unsigned int num_of_regs
= 0;
7164 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7165 if (TEST_HARD_REG_BIT (regs
, i
))
7168 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
7169 rtvec_alloc (0), rtvec_alloc (0),
7170 rtvec_alloc (0), UNKNOWN_LOCATION
);
7171 MEM_VOLATILE_P (asm_op
) = 1;
7173 rtvec v
= rtvec_alloc (num_of_regs
+ 2);
7175 clob_mem
= gen_rtx_SCRATCH (VOIDmode
);
7176 clob_mem
= gen_rtx_MEM (BLKmode
, clob_mem
);
7177 clob_mem
= gen_rtx_CLOBBER (VOIDmode
, clob_mem
);
7179 RTVEC_ELT (v
, 0) = asm_op
;
7180 RTVEC_ELT (v
, 1) = clob_mem
;
7182 if (num_of_regs
> 0)
7185 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7186 if (TEST_HARD_REG_BIT (regs
, i
))
7188 RTVEC_ELT (v
, j
) = gen_rtx_CLOBBER (VOIDmode
, regno_reg_rtx
[i
]);
7191 gcc_assert (j
== (num_of_regs
+ 2));
7194 emit_insn (gen_rtx_PARALLEL (VOIDmode
, v
));
7197 /* This routine will either emit the mem_thread_fence pattern or issue a
7198 sync_synchronize to generate a fence for memory model MEMMODEL. */
7201 expand_mem_thread_fence (enum memmodel model
)
7203 if (is_mm_relaxed (model
))
7205 if (targetm
.have_mem_thread_fence ())
7207 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
7208 expand_memory_blockage ();
7210 else if (targetm
.have_memory_barrier ())
7211 emit_insn (targetm
.gen_memory_barrier ());
7212 else if (synchronize_libfunc
!= NULL_RTX
)
7213 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
7215 expand_memory_blockage ();
7218 /* Emit a signal fence with given memory model. */
7221 expand_mem_signal_fence (enum memmodel model
)
7223 /* No machine barrier is required to implement a signal fence, but
7224 a compiler memory barrier must be issued, except for relaxed MM. */
7225 if (!is_mm_relaxed (model
))
7226 expand_memory_blockage ();
7229 /* This function expands the atomic load operation:
7230 return the atomically loaded value in MEM.
7232 MEMMODEL is the memory model variant to use.
7233 TARGET is an option place to stick the return value. */
7236 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
7238 machine_mode mode
= GET_MODE (mem
);
7239 enum insn_code icode
;
7241 /* If the target supports the load directly, great. */
7242 icode
= direct_optab_handler (atomic_load_optab
, mode
);
7243 if (icode
!= CODE_FOR_nothing
)
7245 class expand_operand ops
[3];
7246 rtx_insn
*last
= get_last_insn ();
7247 if (is_mm_seq_cst (model
))
7248 expand_memory_blockage ();
7250 create_output_operand (&ops
[0], target
, mode
);
7251 create_fixed_operand (&ops
[1], mem
);
7252 create_integer_operand (&ops
[2], model
);
7253 if (maybe_expand_insn (icode
, 3, ops
))
7255 if (!is_mm_relaxed (model
))
7256 expand_memory_blockage ();
7257 return ops
[0].value
;
7259 delete_insns_since (last
);
7262 /* If the size of the object is greater than word size on this target,
7263 then we assume that a load will not be atomic. We could try to
7264 emulate a load with a compare-and-swap operation, but the store that
7265 doing this could result in would be incorrect if this is a volatile
7266 atomic load or targetting read-only-mapped memory. */
7267 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
7268 /* If there is no atomic load, leave the library call. */
7271 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7272 if (!target
|| target
== const0_rtx
)
7273 target
= gen_reg_rtx (mode
);
7275 /* For SEQ_CST, emit a barrier before the load. */
7276 if (is_mm_seq_cst (model
))
7277 expand_mem_thread_fence (model
);
7279 emit_move_insn (target
, mem
);
7281 /* Emit the appropriate barrier after the load. */
7282 expand_mem_thread_fence (model
);
7287 /* This function expands the atomic store operation:
7288 Atomically store VAL in MEM.
7289 MEMMODEL is the memory model variant to use.
7290 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7291 function returns const0_rtx if a pattern was emitted. */
7294 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
7296 machine_mode mode
= GET_MODE (mem
);
7297 enum insn_code icode
;
7298 class expand_operand ops
[3];
7300 /* If the target supports the store directly, great. */
7301 icode
= direct_optab_handler (atomic_store_optab
, mode
);
7302 if (icode
!= CODE_FOR_nothing
)
7304 rtx_insn
*last
= get_last_insn ();
7305 if (!is_mm_relaxed (model
))
7306 expand_memory_blockage ();
7307 create_fixed_operand (&ops
[0], mem
);
7308 create_input_operand (&ops
[1], val
, mode
);
7309 create_integer_operand (&ops
[2], model
);
7310 if (maybe_expand_insn (icode
, 3, ops
))
7312 if (is_mm_seq_cst (model
))
7313 expand_memory_blockage ();
7316 delete_insns_since (last
);
7319 /* If using __sync_lock_release is a viable alternative, try it.
7320 Note that this will not be set to true if we are expanding a generic
7321 __atomic_store_n. */
7324 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
7325 if (icode
!= CODE_FOR_nothing
)
7327 create_fixed_operand (&ops
[0], mem
);
7328 create_input_operand (&ops
[1], const0_rtx
, mode
);
7329 if (maybe_expand_insn (icode
, 2, ops
))
7331 /* lock_release is only a release barrier. */
7332 if (is_mm_seq_cst (model
))
7333 expand_mem_thread_fence (model
);
7339 /* If the size of the object is greater than word size on this target,
7340 a default store will not be atomic. */
7341 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
7343 /* If loads are atomic or we are called to provide a __sync builtin,
7344 we can try a atomic_exchange and throw away the result. Otherwise,
7345 don't do anything so that we do not create an inconsistency between
7346 loads and stores. */
7347 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
7349 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
7351 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
7359 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7360 expand_mem_thread_fence (model
);
7362 emit_move_insn (mem
, val
);
7364 /* For SEQ_CST, also emit a barrier after the store. */
7365 if (is_mm_seq_cst (model
))
7366 expand_mem_thread_fence (model
);
7372 /* Structure containing the pointers and values required to process the
7373 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7375 struct atomic_op_functions
7377 direct_optab mem_fetch_before
;
7378 direct_optab mem_fetch_after
;
7379 direct_optab mem_no_result
;
7382 direct_optab no_result
;
7383 enum rtx_code reverse_code
;
7387 /* Fill in structure pointed to by OP with the various optab entries for an
7388 operation of type CODE. */
7391 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
7393 gcc_assert (op
!= NULL
);
7395 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7396 in the source code during compilation, and the optab entries are not
7397 computable until runtime. Fill in the values at runtime. */
7401 op
->mem_fetch_before
= atomic_fetch_add_optab
;
7402 op
->mem_fetch_after
= atomic_add_fetch_optab
;
7403 op
->mem_no_result
= atomic_add_optab
;
7404 op
->fetch_before
= sync_old_add_optab
;
7405 op
->fetch_after
= sync_new_add_optab
;
7406 op
->no_result
= sync_add_optab
;
7407 op
->reverse_code
= MINUS
;
7410 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
7411 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
7412 op
->mem_no_result
= atomic_sub_optab
;
7413 op
->fetch_before
= sync_old_sub_optab
;
7414 op
->fetch_after
= sync_new_sub_optab
;
7415 op
->no_result
= sync_sub_optab
;
7416 op
->reverse_code
= PLUS
;
7419 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
7420 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
7421 op
->mem_no_result
= atomic_xor_optab
;
7422 op
->fetch_before
= sync_old_xor_optab
;
7423 op
->fetch_after
= sync_new_xor_optab
;
7424 op
->no_result
= sync_xor_optab
;
7425 op
->reverse_code
= XOR
;
7428 op
->mem_fetch_before
= atomic_fetch_and_optab
;
7429 op
->mem_fetch_after
= atomic_and_fetch_optab
;
7430 op
->mem_no_result
= atomic_and_optab
;
7431 op
->fetch_before
= sync_old_and_optab
;
7432 op
->fetch_after
= sync_new_and_optab
;
7433 op
->no_result
= sync_and_optab
;
7434 op
->reverse_code
= UNKNOWN
;
7437 op
->mem_fetch_before
= atomic_fetch_or_optab
;
7438 op
->mem_fetch_after
= atomic_or_fetch_optab
;
7439 op
->mem_no_result
= atomic_or_optab
;
7440 op
->fetch_before
= sync_old_ior_optab
;
7441 op
->fetch_after
= sync_new_ior_optab
;
7442 op
->no_result
= sync_ior_optab
;
7443 op
->reverse_code
= UNKNOWN
;
7446 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
7447 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
7448 op
->mem_no_result
= atomic_nand_optab
;
7449 op
->fetch_before
= sync_old_nand_optab
;
7450 op
->fetch_after
= sync_new_nand_optab
;
7451 op
->no_result
= sync_nand_optab
;
7452 op
->reverse_code
= UNKNOWN
;
7459 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7460 using memory order MODEL. If AFTER is true the operation needs to return
7461 the value of *MEM after the operation, otherwise the previous value.
7462 TARGET is an optional place to place the result. The result is unused if
7464 Return the result if there is a better sequence, otherwise NULL_RTX. */
7467 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7468 enum memmodel model
, bool after
)
7470 /* If the value is prefetched, or not used, it may be possible to replace
7471 the sequence with a native exchange operation. */
7472 if (!after
|| target
== const0_rtx
)
7474 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7475 if (code
== AND
&& val
== const0_rtx
)
7477 if (target
== const0_rtx
)
7478 target
= gen_reg_rtx (GET_MODE (mem
));
7479 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7482 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7483 if (code
== IOR
&& val
== constm1_rtx
)
7485 if (target
== const0_rtx
)
7486 target
= gen_reg_rtx (GET_MODE (mem
));
7487 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7494 /* Try to emit an instruction for a specific operation varaition.
7495 OPTAB contains the OP functions.
7496 TARGET is an optional place to return the result. const0_rtx means unused.
7497 MEM is the memory location to operate on.
7498 VAL is the value to use in the operation.
7499 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7500 MODEL is the memory model, if used.
7501 AFTER is true if the returned result is the value after the operation. */
7504 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
7505 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
7507 machine_mode mode
= GET_MODE (mem
);
7508 class expand_operand ops
[4];
7509 enum insn_code icode
;
7513 /* Check to see if there is a result returned. */
7514 if (target
== const0_rtx
)
7518 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
7519 create_integer_operand (&ops
[2], model
);
7524 icode
= direct_optab_handler (optab
->no_result
, mode
);
7528 /* Otherwise, we need to generate a result. */
7533 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
7534 : optab
->mem_fetch_before
, mode
);
7535 create_integer_operand (&ops
[3], model
);
7540 icode
= optab_handler (after
? optab
->fetch_after
7541 : optab
->fetch_before
, mode
);
7544 create_output_operand (&ops
[op_counter
++], target
, mode
);
7546 if (icode
== CODE_FOR_nothing
)
7549 create_fixed_operand (&ops
[op_counter
++], mem
);
7550 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7551 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
7553 if (maybe_expand_insn (icode
, num_ops
, ops
))
7554 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
7560 /* This function expands an atomic fetch_OP or OP_fetch operation:
7561 TARGET is an option place to stick the return value. const0_rtx indicates
7562 the result is unused.
7563 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7564 CODE is the operation being performed (OP)
7565 MEMMODEL is the memory model variant to use.
7566 AFTER is true to return the result of the operation (OP_fetch).
7567 AFTER is false to return the value before the operation (fetch_OP).
7569 This function will *only* generate instructions if there is a direct
7570 optab. No compare and swap loops or libcalls will be generated. */
7573 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
7574 enum rtx_code code
, enum memmodel model
,
7577 machine_mode mode
= GET_MODE (mem
);
7578 struct atomic_op_functions optab
;
7580 bool unused_result
= (target
== const0_rtx
);
7582 get_atomic_op_for_code (&optab
, code
);
7584 /* Check to see if there are any better instructions. */
7585 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
7589 /* Check for the case where the result isn't used and try those patterns. */
7592 /* Try the memory model variant first. */
7593 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
7597 /* Next try the old style withuot a memory model. */
7598 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
7602 /* There is no no-result pattern, so try patterns with a result. */
7606 /* Try the __atomic version. */
7607 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
7611 /* Try the older __sync version. */
7612 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
7616 /* If the fetch value can be calculated from the other variation of fetch,
7617 try that operation. */
7618 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
7620 /* Try the __atomic version, then the older __sync version. */
7621 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
7623 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
7627 /* If the result isn't used, no need to do compensation code. */
7631 /* Issue compensation code. Fetch_after == fetch_before OP val.
7632 Fetch_before == after REVERSE_OP val. */
7634 code
= optab
.reverse_code
;
7637 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
7638 true, OPTAB_LIB_WIDEN
);
7639 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
7642 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7643 true, OPTAB_LIB_WIDEN
);
7648 /* No direct opcode can be generated. */
7654 /* This function expands an atomic fetch_OP or OP_fetch operation:
7655 TARGET is an option place to stick the return value. const0_rtx indicates
7656 the result is unused.
7657 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7658 CODE is the operation being performed (OP)
7659 MEMMODEL is the memory model variant to use.
7660 AFTER is true to return the result of the operation (OP_fetch).
7661 AFTER is false to return the value before the operation (fetch_OP). */
7663 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7664 enum memmodel model
, bool after
)
7666 machine_mode mode
= GET_MODE (mem
);
7668 bool unused_result
= (target
== const0_rtx
);
7670 /* If loads are not atomic for the required size and we are not called to
7671 provide a __sync builtin, do not do anything so that we stay consistent
7672 with atomic loads of the same size. */
7673 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
7676 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
7682 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7683 if (code
== PLUS
|| code
== MINUS
)
7686 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
7689 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
7690 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
7694 /* PLUS worked so emit the insns and return. */
7701 /* PLUS did not work, so throw away the negation code and continue. */
7705 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7706 if (!can_compare_and_swap_p (mode
, false))
7710 enum rtx_code orig_code
= code
;
7711 struct atomic_op_functions optab
;
7713 get_atomic_op_for_code (&optab
, code
);
7714 libfunc
= optab_libfunc (after
? optab
.fetch_after
7715 : optab
.fetch_before
, mode
);
7717 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
7721 code
= optab
.reverse_code
;
7722 libfunc
= optab_libfunc (after
? optab
.fetch_before
7723 : optab
.fetch_after
, mode
);
7725 if (libfunc
!= NULL
)
7727 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7728 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
7729 addr
, ptr_mode
, val
, mode
);
7731 if (!unused_result
&& fixup
)
7732 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7733 true, OPTAB_LIB_WIDEN
);
7737 /* We need the original code for any further attempts. */
7741 /* If nothing else has succeeded, default to a compare and swap loop. */
7742 if (can_compare_and_swap_p (mode
, true))
7745 rtx t0
= gen_reg_rtx (mode
), t1
;
7749 /* If the result is used, get a register for it. */
7752 if (!target
|| !register_operand (target
, mode
))
7753 target
= gen_reg_rtx (mode
);
7754 /* If fetch_before, copy the value now. */
7756 emit_move_insn (target
, t0
);
7759 target
= const0_rtx
;
7764 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7765 true, OPTAB_LIB_WIDEN
);
7766 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7769 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7772 /* For after, copy the value now. */
7773 if (!unused_result
&& after
)
7774 emit_move_insn (target
, t1
);
7775 insn
= get_insns ();
7778 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7785 /* Return true if OPERAND is suitable for operand number OPNO of
7786 instruction ICODE. */
7789 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7791 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7792 || (insn_data
[(int) icode
].operand
[opno
].predicate
7793 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7796 /* TARGET is a target of a multiword operation that we are going to
7797 implement as a series of word-mode operations. Return true if
7798 TARGET is suitable for this purpose. */
7801 valid_multiword_target_p (rtx target
)
7806 mode
= GET_MODE (target
);
7807 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7809 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7810 if (!validate_subreg (word_mode
, mode
, target
, i
))
7815 /* Make OP describe an input operand that has value INTVAL and that has
7816 no inherent mode. This function should only be used for operands that
7817 are always expand-time constants. The backend may request that INTVAL
7818 be copied into a different kind of rtx, but it must specify the mode
7819 of that rtx if so. */
7822 create_integer_operand (class expand_operand
*op
, poly_int64 intval
)
7824 create_expand_operand (op
, EXPAND_INTEGER
,
7825 gen_int_mode (intval
, MAX_MODE_INT
),
7826 VOIDmode
, false, intval
);
7829 /* Like maybe_legitimize_operand, but do not change the code of the
7830 current rtx value. */
7833 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7834 class expand_operand
*op
)
7836 /* See if the operand matches in its current form. */
7837 if (insn_operand_matches (icode
, opno
, op
->value
))
7840 /* If the operand is a memory whose address has no side effects,
7841 try forcing the address into a non-virtual pseudo register.
7842 The check for side effects is important because copy_to_mode_reg
7843 cannot handle things like auto-modified addresses. */
7844 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7849 addr
= XEXP (mem
, 0);
7850 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7851 && !side_effects_p (addr
))
7856 last
= get_last_insn ();
7857 mode
= get_address_mode (mem
);
7858 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
7859 if (insn_operand_matches (icode
, opno
, mem
))
7864 delete_insns_since (last
);
7871 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7872 on success, storing the new operand value back in OP. */
7875 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7876 class expand_operand
*op
)
7878 machine_mode mode
, imode
, tmode
;
7885 temporary_volatile_ok
v (true);
7886 return maybe_legitimize_operand_same_code (icode
, opno
, op
);
7890 gcc_assert (mode
!= VOIDmode
);
7892 && op
->value
!= const0_rtx
7893 && GET_MODE (op
->value
) == mode
7894 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7897 op
->value
= gen_reg_rtx (mode
);
7903 gcc_assert (mode
!= VOIDmode
);
7904 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7905 || GET_MODE (op
->value
) == mode
);
7906 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7909 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7912 case EXPAND_CONVERT_TO
:
7913 gcc_assert (mode
!= VOIDmode
);
7914 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7917 case EXPAND_CONVERT_FROM
:
7918 if (GET_MODE (op
->value
) != VOIDmode
)
7919 mode
= GET_MODE (op
->value
);
7921 /* The caller must tell us what mode this value has. */
7922 gcc_assert (mode
!= VOIDmode
);
7924 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7925 tmode
= (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
)
7926 ? GET_MODE_INNER (imode
) : imode
);
7927 if (tmode
!= VOIDmode
&& tmode
!= mode
)
7929 op
->value
= convert_modes (tmode
, mode
, op
->value
, op
->unsigned_p
);
7932 if (imode
!= VOIDmode
&& imode
!= mode
)
7934 gcc_assert (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
));
7935 op
->value
= expand_vector_broadcast (imode
, op
->value
);
7940 case EXPAND_ADDRESS
:
7941 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
7945 case EXPAND_INTEGER
:
7946 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7947 if (mode
!= VOIDmode
7948 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
7951 op
->value
= gen_int_mode (op
->int_value
, mode
);
7956 return insn_operand_matches (icode
, opno
, op
->value
);
7959 /* Make OP describe an input operand that should have the same value
7960 as VALUE, after any mode conversion that the target might request.
7961 TYPE is the type of VALUE. */
7964 create_convert_operand_from_type (class expand_operand
*op
,
7965 rtx value
, tree type
)
7967 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7968 TYPE_UNSIGNED (type
));
7971 /* Return true if the requirements on operands OP1 and OP2 of instruction
7972 ICODE are similar enough for the result of legitimizing OP1 to be
7973 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7974 with OP1 and OP2 respectively. */
7977 can_reuse_operands_p (enum insn_code icode
,
7978 unsigned int opno1
, unsigned int opno2
,
7979 const class expand_operand
*op1
,
7980 const class expand_operand
*op2
)
7982 /* Check requirements that are common to all types. */
7983 if (op1
->type
!= op2
->type
7984 || op1
->mode
!= op2
->mode
7985 || (insn_data
[(int) icode
].operand
[opno1
].mode
7986 != insn_data
[(int) icode
].operand
[opno2
].mode
))
7989 /* Check the requirements for specific types. */
7993 /* Outputs must remain distinct. */
7998 case EXPAND_ADDRESS
:
7999 case EXPAND_INTEGER
:
8002 case EXPAND_CONVERT_TO
:
8003 case EXPAND_CONVERT_FROM
:
8004 return op1
->unsigned_p
== op2
->unsigned_p
;
8009 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8010 of instruction ICODE. Return true on success, leaving the new operand
8011 values in the OPS themselves. Emit no code on failure. */
8014 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
8015 unsigned int nops
, class expand_operand
*ops
)
8017 rtx_insn
*last
= get_last_insn ();
8018 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
8019 for (unsigned int i
= 0; i
< nops
; i
++)
8021 orig_values
[i
] = ops
[i
].value
;
8023 /* First try reusing the result of an earlier legitimization.
8024 This avoids duplicate rtl and ensures that tied operands
8027 This search is linear, but NOPS is bounded at compile time
8028 to a small number (current a single digit). */
8031 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
8032 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
8034 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
8036 ops
[i
].value
= copy_rtx (ops
[j
].value
);
8040 /* Otherwise try legitimizing the operand on its own. */
8041 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
8043 delete_insns_since (last
);
8050 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8051 as its operands. Return the instruction pattern on success,
8052 and emit any necessary set-up code. Return null and emit no
8056 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
8057 class expand_operand
*ops
)
8059 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
8060 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
8066 return GEN_FCN (icode
) ();
8068 return GEN_FCN (icode
) (ops
[0].value
);
8070 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
8072 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
8074 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8077 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8078 ops
[3].value
, ops
[4].value
);
8080 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8081 ops
[3].value
, ops
[4].value
, ops
[5].value
);
8083 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8084 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8087 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8088 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8089 ops
[6].value
, ops
[7].value
);
8091 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8092 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8093 ops
[6].value
, ops
[7].value
, ops
[8].value
);
8098 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8099 as its operands. Return true on success and emit no code on failure. */
8102 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
8103 class expand_operand
*ops
)
8105 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
8114 /* Like maybe_expand_insn, but for jumps. */
8117 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8118 class expand_operand
*ops
)
8120 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
8123 emit_jump_insn (pat
);
8129 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8133 expand_insn (enum insn_code icode
, unsigned int nops
,
8134 class expand_operand
*ops
)
8136 if (!maybe_expand_insn (icode
, nops
, ops
))
8140 /* Like expand_insn, but for jumps. */
8143 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8144 class expand_operand
*ops
)
8146 if (!maybe_expand_jump_insn (icode
, nops
, ops
))