1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
48 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
50 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
51 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
53 /* Debug facility for use in GDB. */
54 void debug_optab_libfuncs (void);
56 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
57 the result of operation CODE applied to OP0 (and OP1 if it is a binary
58 operation). OP0_MODE is OP0's mode.
60 If the last insn does not set TARGET, don't do anything, but return 1.
62 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
63 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
64 try again, ensuring that TARGET is not one of the operands. */
67 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
,
68 rtx op1
, machine_mode op0_mode
)
74 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
76 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
77 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
78 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
79 && GET_RTX_CLASS (code
) != RTX_COMPARE
80 && GET_RTX_CLASS (code
) != RTX_UNARY
)
83 if (GET_CODE (target
) == ZERO_EXTRACT
)
86 for (last_insn
= insns
;
87 NEXT_INSN (last_insn
) != NULL_RTX
;
88 last_insn
= NEXT_INSN (last_insn
))
91 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
92 a value changing in the insn, so the note would be invalid for CSE. */
93 if (reg_overlap_mentioned_p (target
, op0
)
94 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
97 && (rtx_equal_p (target
, op0
)
98 || (op1
&& rtx_equal_p (target
, op1
))))
100 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
101 over expanding it as temp = MEM op X, MEM = temp. If the target
102 supports MEM = MEM op X instructions, it is sometimes too hard
103 to reconstruct that form later, especially if X is also a memory,
104 and due to multiple occurrences of addresses the address might
105 be forced into register unnecessarily.
106 Note that not emitting the REG_EQUIV note might inhibit
107 CSE in some cases. */
108 set
= single_set (last_insn
);
110 && GET_CODE (SET_SRC (set
)) == code
111 && MEM_P (SET_DEST (set
))
112 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
113 || (op1
&& rtx_equal_p (SET_DEST (set
),
114 XEXP (SET_SRC (set
), 1)))))
120 set
= set_for_reg_notes (last_insn
);
124 if (! rtx_equal_p (SET_DEST (set
), target
)
125 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
126 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
127 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
130 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
140 if (op0_mode
!= VOIDmode
&& GET_MODE (target
) != op0_mode
)
142 note
= gen_rtx_fmt_e (code
, op0_mode
, copy_rtx (op0
));
143 if (GET_MODE_UNIT_SIZE (op0_mode
)
144 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
145 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
148 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
154 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
158 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
160 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
165 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
166 for a widening operation would be. In most cases this would be OP0, but if
167 that's a constant it'll be VOIDmode, which isn't useful. */
170 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
172 machine_mode m0
= GET_MODE (op0
);
173 machine_mode m1
= GET_MODE (op1
);
176 if (m0
== VOIDmode
&& m1
== VOIDmode
)
178 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
183 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
189 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
190 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
191 not actually do a sign-extend or zero-extend, but can leave the
192 higher-order bits of the result rtx undefined, for example, in the case
193 of logical operations, but not right shifts. */
196 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
197 int unsignedp
, int no_extend
)
200 scalar_int_mode int_mode
;
202 /* If we don't have to extend and this is a constant, return it. */
203 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
206 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
207 extend since it will be more efficient to do so unless the signedness of
208 a promoted object differs from our extension. */
210 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
211 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
212 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
213 return convert_modes (mode
, oldmode
, op
, unsignedp
);
215 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
217 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
218 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
220 /* Otherwise, get an object of MODE, clobber it, and set the low-order
223 result
= gen_reg_rtx (int_mode
);
224 emit_clobber (result
);
225 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
229 /* Expand vector widening operations.
231 There are two different classes of operations handled here:
232 1) Operations whose result is wider than all the arguments to the operation.
233 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
234 In this case OP0 and optionally OP1 would be initialized,
235 but WIDE_OP wouldn't (not relevant for this case).
236 2) Operations whose result is of the same size as the last argument to the
237 operation, but wider than all the other arguments to the operation.
238 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
239 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
241 E.g, when called to expand the following operations, this is how
242 the arguments will be initialized:
244 widening-sum 2 oprnd0 - oprnd1
245 widening-dot-product 3 oprnd0 oprnd1 oprnd2
246 widening-mult 2 oprnd0 oprnd1 -
247 type-promotion (vec-unpack) 1 oprnd0 - - */
250 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
251 rtx target
, int unsignedp
)
253 class expand_operand eops
[4];
254 tree oprnd0
, oprnd1
, oprnd2
;
255 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
256 optab widen_pattern_optab
;
257 enum insn_code icode
;
258 int nops
= TREE_CODE_LENGTH (ops
->code
);
263 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
264 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
265 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
266 /* The sign is from the result type rather than operand's type
269 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
270 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
271 || ops
->code
== VEC_UNPACK_LO_EXPR
)
272 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
273 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
274 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
275 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
277 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
278 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
279 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
280 the pattern number of elements in the wider vector. */
282 = (ops
->code
== VEC_UNPACK_HI_EXPR
283 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
288 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
289 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
290 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
291 icode
= find_widening_optab_handler (widen_pattern_optab
,
292 TYPE_MODE (TREE_TYPE (ops
->op2
)),
295 icode
= optab_handler (widen_pattern_optab
, tmode0
);
296 gcc_assert (icode
!= CODE_FOR_nothing
);
301 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
306 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
310 /* The last operand is of a wider mode than the rest of the operands. */
315 gcc_assert (tmode1
== tmode0
);
318 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
322 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
323 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
325 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
327 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
328 expand_insn (icode
, op
, eops
);
329 return eops
[0].value
;
332 /* Generate code to perform an operation specified by TERNARY_OPTAB
333 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
335 UNSIGNEDP is for the case where we have to widen the operands
336 to perform the operation. It says to use zero-extension.
338 If TARGET is nonzero, the value
339 is generated there, if it is convenient to do so.
340 In all cases an rtx is returned for the locus of the value;
341 this may or may not be TARGET. */
344 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
345 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
347 class expand_operand ops
[4];
348 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
350 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
352 create_output_operand (&ops
[0], target
, mode
);
353 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
354 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
355 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
356 expand_insn (icode
, 4, ops
);
361 /* Like expand_binop, but return a constant rtx if the result can be
362 calculated at compile time. The arguments and return value are
363 otherwise the same as for expand_binop. */
366 simplify_expand_binop (machine_mode mode
, optab binoptab
,
367 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
368 enum optab_methods methods
)
370 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
372 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
378 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
381 /* Like simplify_expand_binop, but always put the result in TARGET.
382 Return true if the expansion succeeded. */
385 force_expand_binop (machine_mode mode
, optab binoptab
,
386 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
387 enum optab_methods methods
)
389 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
390 target
, unsignedp
, methods
);
394 emit_move_insn (target
, x
);
398 /* Create a new vector value in VMODE with all elements set to OP. The
399 mode of OP must be the element mode of VMODE. If OP is a constant,
400 then the return value will be a constant. */
403 expand_vector_broadcast (machine_mode vmode
, rtx op
)
408 gcc_checking_assert (VECTOR_MODE_P (vmode
));
410 if (valid_for_const_vector_p (vmode
, op
))
411 return gen_const_vec_duplicate (vmode
, op
);
413 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
414 if (icode
!= CODE_FOR_nothing
)
416 class expand_operand ops
[2];
417 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
418 create_input_operand (&ops
[1], op
, GET_MODE (op
));
419 expand_insn (icode
, 2, ops
);
423 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
426 /* ??? If the target doesn't have a vec_init, then we have no easy way
427 of performing this operation. Most of this sort of generic support
428 is hidden away in the vector lowering support in gimple. */
429 icode
= convert_optab_handler (vec_init_optab
, vmode
,
430 GET_MODE_INNER (vmode
));
431 if (icode
== CODE_FOR_nothing
)
434 vec
= rtvec_alloc (n
);
435 for (int i
= 0; i
< n
; ++i
)
436 RTVEC_ELT (vec
, i
) = op
;
437 rtx ret
= gen_reg_rtx (vmode
);
438 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
443 /* This subroutine of expand_doubleword_shift handles the cases in which
444 the effective shift value is >= BITS_PER_WORD. The arguments and return
445 value are the same as for the parent routine, except that SUPERWORD_OP1
446 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
447 INTO_TARGET may be null if the caller has decided to calculate it. */
450 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
451 rtx outof_target
, rtx into_target
,
452 int unsignedp
, enum optab_methods methods
)
454 if (into_target
!= 0)
455 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
456 into_target
, unsignedp
, methods
))
459 if (outof_target
!= 0)
461 /* For a signed right shift, we must fill OUTOF_TARGET with copies
462 of the sign bit, otherwise we must fill it with zeros. */
463 if (binoptab
!= ashr_optab
)
464 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
466 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
467 gen_int_shift_amount (word_mode
,
469 outof_target
, unsignedp
, methods
))
475 /* This subroutine of expand_doubleword_shift handles the cases in which
476 the effective shift value is < BITS_PER_WORD. The arguments and return
477 value are the same as for the parent routine. */
480 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
481 rtx outof_input
, rtx into_input
, rtx op1
,
482 rtx outof_target
, rtx into_target
,
483 int unsignedp
, enum optab_methods methods
,
484 unsigned HOST_WIDE_INT shift_mask
)
486 optab reverse_unsigned_shift
, unsigned_shift
;
489 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
490 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
492 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
493 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
494 the opposite direction to BINOPTAB. */
495 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
497 carries
= outof_input
;
498 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
499 op1_mode
), op1_mode
);
500 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
505 /* We must avoid shifting by BITS_PER_WORD bits since that is either
506 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
507 has unknown behavior. Do a single shift first, then shift by the
508 remainder. It's OK to use ~OP1 as the remainder if shift counts
509 are truncated to the mode size. */
510 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
511 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
512 if (shift_mask
== BITS_PER_WORD
- 1)
514 tmp
= immed_wide_int_const
515 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
516 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
521 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
522 op1_mode
), op1_mode
);
523 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
527 if (tmp
== 0 || carries
== 0)
529 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
530 carries
, tmp
, 0, unsignedp
, methods
);
534 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
535 so the result can go directly into INTO_TARGET if convenient. */
536 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
537 into_target
, unsignedp
, methods
);
541 /* Now OR in the bits carried over from OUTOF_INPUT. */
542 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
543 into_target
, unsignedp
, methods
))
546 /* Use a standard word_mode shift for the out-of half. */
547 if (outof_target
!= 0)
548 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
549 outof_target
, unsignedp
, methods
))
556 /* Try implementing expand_doubleword_shift using conditional moves.
557 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
558 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
559 are the shift counts to use in the former and latter case. All other
560 arguments are the same as the parent routine. */
563 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
564 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
565 rtx outof_input
, rtx into_input
,
566 rtx subword_op1
, rtx superword_op1
,
567 rtx outof_target
, rtx into_target
,
568 int unsignedp
, enum optab_methods methods
,
569 unsigned HOST_WIDE_INT shift_mask
)
571 rtx outof_superword
, into_superword
;
573 /* Put the superword version of the output into OUTOF_SUPERWORD and
575 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
576 if (outof_target
!= 0 && subword_op1
== superword_op1
)
578 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
579 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
580 into_superword
= outof_target
;
581 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
582 outof_superword
, 0, unsignedp
, methods
))
587 into_superword
= gen_reg_rtx (word_mode
);
588 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
589 outof_superword
, into_superword
,
594 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
595 if (!expand_subword_shift (op1_mode
, binoptab
,
596 outof_input
, into_input
, subword_op1
,
597 outof_target
, into_target
,
598 unsignedp
, methods
, shift_mask
))
601 /* Select between them. Do the INTO half first because INTO_SUPERWORD
602 might be the current value of OUTOF_TARGET. */
603 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 into_target
, into_superword
, word_mode
, false))
607 if (outof_target
!= 0)
608 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
609 outof_target
, outof_superword
,
616 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
617 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
618 input operand; the shift moves bits in the direction OUTOF_INPUT->
619 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
620 of the target. OP1 is the shift count and OP1_MODE is its mode.
621 If OP1 is constant, it will have been truncated as appropriate
622 and is known to be nonzero.
624 If SHIFT_MASK is zero, the result of word shifts is undefined when the
625 shift count is outside the range [0, BITS_PER_WORD). This routine must
626 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
628 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
629 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
630 fill with zeros or sign bits as appropriate.
632 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
633 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
634 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
635 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
638 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
639 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
640 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
641 function wants to calculate it itself.
643 Return true if the shift could be successfully synthesized. */
646 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
647 rtx outof_input
, rtx into_input
, rtx op1
,
648 rtx outof_target
, rtx into_target
,
649 int unsignedp
, enum optab_methods methods
,
650 unsigned HOST_WIDE_INT shift_mask
)
652 rtx superword_op1
, tmp
, cmp1
, cmp2
;
653 enum rtx_code cmp_code
;
655 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
656 fill the result with sign or zero bits as appropriate. If so, the value
657 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
658 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
659 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
661 This isn't worthwhile for constant shifts since the optimizers will
662 cope better with in-range shift counts. */
663 if (shift_mask
>= BITS_PER_WORD
665 && !CONSTANT_P (op1
))
667 if (!expand_doubleword_shift (op1_mode
, binoptab
,
668 outof_input
, into_input
, op1
,
670 unsignedp
, methods
, shift_mask
))
672 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
673 outof_target
, unsignedp
, methods
))
678 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
679 is true when the effective shift value is less than BITS_PER_WORD.
680 Set SUPERWORD_OP1 to the shift count that should be used to shift
681 OUTOF_INPUT into INTO_TARGET when the condition is false. */
682 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
683 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
685 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
686 is a subword shift count. */
687 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
689 cmp2
= CONST0_RTX (op1_mode
);
695 /* Set CMP1 to OP1 - BITS_PER_WORD. */
696 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
698 cmp2
= CONST0_RTX (op1_mode
);
700 superword_op1
= cmp1
;
705 /* If we can compute the condition at compile time, pick the
706 appropriate subroutine. */
707 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
708 if (tmp
!= 0 && CONST_INT_P (tmp
))
710 if (tmp
== const0_rtx
)
711 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
712 outof_target
, into_target
,
715 return expand_subword_shift (op1_mode
, binoptab
,
716 outof_input
, into_input
, op1
,
717 outof_target
, into_target
,
718 unsignedp
, methods
, shift_mask
);
721 /* Try using conditional moves to generate straight-line code. */
722 if (HAVE_conditional_move
)
724 rtx_insn
*start
= get_last_insn ();
725 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
726 cmp_code
, cmp1
, cmp2
,
727 outof_input
, into_input
,
729 outof_target
, into_target
,
730 unsignedp
, methods
, shift_mask
))
732 delete_insns_since (start
);
735 /* As a last resort, use branches to select the correct alternative. */
736 rtx_code_label
*subword_label
= gen_label_rtx ();
737 rtx_code_label
*done_label
= gen_label_rtx ();
740 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
742 profile_probability::uninitialized ());
745 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
746 outof_target
, into_target
,
750 emit_jump_insn (targetm
.gen_jump (done_label
));
752 emit_label (subword_label
);
754 if (!expand_subword_shift (op1_mode
, binoptab
,
755 outof_input
, into_input
, op1
,
756 outof_target
, into_target
,
757 unsignedp
, methods
, shift_mask
))
760 emit_label (done_label
);
764 /* Subroutine of expand_binop. Perform a double word multiplication of
765 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
766 as the target's word_mode. This function return NULL_RTX if anything
767 goes wrong, in which case it may have already emitted instructions
768 which need to be deleted.
770 If we want to multiply two two-word values and have normal and widening
771 multiplies of single-word values, we can do this with three smaller
774 The multiplication proceeds as follows:
775 _______________________
776 [__op0_high_|__op0_low__]
777 _______________________
778 * [__op1_high_|__op1_low__]
779 _______________________________________________
780 _______________________
781 (1) [__op0_low__*__op1_low__]
782 _______________________
783 (2a) [__op0_low__*__op1_high_]
784 _______________________
785 (2b) [__op0_high_*__op1_low__]
786 _______________________
787 (3) [__op0_high_*__op1_high_]
790 This gives a 4-word result. Since we are only interested in the
791 lower 2 words, partial result (3) and the upper words of (2a) and
792 (2b) don't need to be calculated. Hence (2a) and (2b) can be
793 calculated using non-widening multiplication.
795 (1), however, needs to be calculated with an unsigned widening
796 multiplication. If this operation is not directly supported we
797 try using a signed widening multiplication and adjust the result.
798 This adjustment works as follows:
800 If both operands are positive then no adjustment is needed.
802 If the operands have different signs, for example op0_low < 0 and
803 op1_low >= 0, the instruction treats the most significant bit of
804 op0_low as a sign bit instead of a bit with significance
805 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
806 with 2**BITS_PER_WORD - op0_low, and two's complements the
807 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
810 Similarly, if both operands are negative, we need to add
811 (op0_low + op1_low) * 2**BITS_PER_WORD.
813 We use a trick to adjust quickly. We logically shift op0_low right
814 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
815 op0_high (op1_high) before it is used to calculate 2b (2a). If no
816 logical shift exists, we do an arithmetic right shift and subtract
820 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
821 bool umulp
, enum optab_methods methods
)
823 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
824 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
825 rtx wordm1
= (umulp
? NULL_RTX
826 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
827 rtx product
, adjust
, product_high
, temp
;
829 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
830 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
831 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
832 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
834 /* If we're using an unsigned multiply to directly compute the product
835 of the low-order words of the operands and perform any required
836 adjustments of the operands, we begin by trying two more multiplications
837 and then computing the appropriate sum.
839 We have checked above that the required addition is provided.
840 Full-word addition will normally always succeed, especially if
841 it is provided at all, so we don't worry about its failure. The
842 multiplication may well fail, however, so we do handle that. */
846 /* ??? This could be done with emit_store_flag where available. */
847 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
848 NULL_RTX
, 1, methods
);
850 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
851 NULL_RTX
, 0, OPTAB_DIRECT
);
854 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
855 NULL_RTX
, 0, methods
);
858 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
859 NULL_RTX
, 0, OPTAB_DIRECT
);
866 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
867 NULL_RTX
, 0, OPTAB_DIRECT
);
871 /* OP0_HIGH should now be dead. */
875 /* ??? This could be done with emit_store_flag where available. */
876 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
877 NULL_RTX
, 1, methods
);
879 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
880 NULL_RTX
, 0, OPTAB_DIRECT
);
883 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
884 NULL_RTX
, 0, methods
);
887 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
888 NULL_RTX
, 0, OPTAB_DIRECT
);
895 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
896 NULL_RTX
, 0, OPTAB_DIRECT
);
900 /* OP1_HIGH should now be dead. */
902 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
903 NULL_RTX
, 0, OPTAB_DIRECT
);
905 if (target
&& !REG_P (target
))
908 /* *_widen_optab needs to determine operand mode, make sure at least
909 one operand has non-VOID mode. */
910 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
911 op0_low
= force_reg (word_mode
, op0_low
);
914 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
915 target
, 1, OPTAB_DIRECT
);
917 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
918 target
, 1, OPTAB_DIRECT
);
923 product_high
= operand_subword (product
, high
, 1, mode
);
924 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
925 NULL_RTX
, 0, OPTAB_DIRECT
);
926 emit_move_insn (product_high
, adjust
);
930 /* Wrapper around expand_binop which takes an rtx code to specify
931 the operation to perform, not an optab pointer. All other
932 arguments are the same. */
934 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
935 rtx op1
, rtx target
, int unsignedp
,
936 enum optab_methods methods
)
938 optab binop
= code_to_optab (code
);
941 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
944 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
945 binop. Order them according to commutative_operand_precedence and, if
946 possible, try to put TARGET or a pseudo first. */
948 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
950 int op0_prec
= commutative_operand_precedence (op0
);
951 int op1_prec
= commutative_operand_precedence (op1
);
953 if (op0_prec
< op1_prec
)
956 if (op0_prec
> op1_prec
)
959 /* With equal precedence, both orders are ok, but it is better if the
960 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
961 if (target
== 0 || REG_P (target
))
962 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
964 return rtx_equal_p (op1
, target
);
967 /* Return true if BINOPTAB implements a shift operation. */
970 shift_optab_p (optab binoptab
)
972 switch (optab_to_code (binoptab
))
988 /* Return true if BINOPTAB implements a commutative binary operation. */
991 commutative_optab_p (optab binoptab
)
993 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
994 || binoptab
== smul_widen_optab
995 || binoptab
== umul_widen_optab
996 || binoptab
== smul_highpart_optab
997 || binoptab
== umul_highpart_optab
);
1000 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1001 optimizing, and if the operand is a constant that costs more than
1002 1 instruction, force the constant into a register and return that
1003 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1006 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1007 int opn
, rtx x
, bool unsignedp
)
1009 bool speed
= optimize_insn_for_speed_p ();
1011 if (mode
!= VOIDmode
1014 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1015 > set_src_cost (x
, mode
, speed
)))
1017 if (CONST_INT_P (x
))
1019 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1020 if (intval
!= INTVAL (x
))
1021 x
= GEN_INT (intval
);
1024 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1025 x
= force_reg (mode
, x
);
1030 /* Helper function for expand_binop: handle the case where there
1031 is an insn ICODE that directly implements the indicated operation.
1032 Returns null if this is not possible. */
1034 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1036 rtx target
, int unsignedp
, enum optab_methods methods
,
1039 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1040 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1041 machine_mode mode0
, mode1
, tmp_mode
;
1042 class expand_operand ops
[3];
1045 rtx xop0
= op0
, xop1
= op1
;
1046 bool canonicalize_op1
= false;
1048 /* If it is a commutative operator and the modes would match
1049 if we would swap the operands, we can save the conversions. */
1050 commutative_p
= commutative_optab_p (binoptab
);
1052 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1053 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1054 std::swap (xop0
, xop1
);
1056 /* If we are optimizing, force expensive constants into a register. */
1057 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1058 if (!shift_optab_p (binoptab
))
1059 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1061 /* Shifts and rotates often use a different mode for op1 from op0;
1062 for VOIDmode constants we don't know the mode, so force it
1063 to be canonicalized using convert_modes. */
1064 canonicalize_op1
= true;
1066 /* In case the insn wants input operands in modes different from
1067 those of the actual operands, convert the operands. It would
1068 seem that we don't need to convert CONST_INTs, but we do, so
1069 that they're properly zero-extended, sign-extended or truncated
1072 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1073 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1075 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1079 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1080 ? GET_MODE (xop1
) : mode
);
1081 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1083 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1087 /* If operation is commutative,
1088 try to make the first operand a register.
1089 Even better, try to make it the same as the target.
1090 Also try to make the last operand a constant. */
1092 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1093 std::swap (xop0
, xop1
);
1095 /* Now, if insn's predicates don't allow our operands, put them into
1098 if (binoptab
== vec_pack_trunc_optab
1099 || binoptab
== vec_pack_usat_optab
1100 || binoptab
== vec_pack_ssat_optab
1101 || binoptab
== vec_pack_ufix_trunc_optab
1102 || binoptab
== vec_pack_sfix_trunc_optab
1103 || binoptab
== vec_packu_float_optab
1104 || binoptab
== vec_packs_float_optab
)
1106 /* The mode of the result is different then the mode of the
1108 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1109 if (VECTOR_MODE_P (mode
)
1110 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1112 delete_insns_since (last
);
1119 create_output_operand (&ops
[0], target
, tmp_mode
);
1120 create_input_operand (&ops
[1], xop0
, mode0
);
1121 create_input_operand (&ops
[2], xop1
, mode1
);
1122 pat
= maybe_gen_insn (icode
, 3, ops
);
1125 /* If PAT is composed of more than one insn, try to add an appropriate
1126 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1127 operand, call expand_binop again, this time without a target. */
1128 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1129 && ! add_equal_note (pat
, ops
[0].value
,
1130 optab_to_code (binoptab
),
1131 ops
[1].value
, ops
[2].value
, mode0
))
1133 delete_insns_since (last
);
1134 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1135 unsignedp
, methods
);
1139 return ops
[0].value
;
1141 delete_insns_since (last
);
1145 /* Generate code to perform an operation specified by BINOPTAB
1146 on operands OP0 and OP1, with result having machine-mode MODE.
1148 UNSIGNEDP is for the case where we have to widen the operands
1149 to perform the operation. It says to use zero-extension.
1151 If TARGET is nonzero, the value
1152 is generated there, if it is convenient to do so.
1153 In all cases an rtx is returned for the locus of the value;
1154 this may or may not be TARGET. */
1157 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1158 rtx target
, int unsignedp
, enum optab_methods methods
)
1160 enum optab_methods next_methods
1161 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1162 ? OPTAB_WIDEN
: methods
);
1163 enum mode_class mclass
;
1164 enum insn_code icode
;
1165 machine_mode wider_mode
;
1166 scalar_int_mode int_mode
;
1169 rtx_insn
*entry_last
= get_last_insn ();
1172 mclass
= GET_MODE_CLASS (mode
);
1174 /* If subtracting an integer constant, convert this into an addition of
1175 the negated constant. */
1177 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1179 op1
= negate_rtx (mode
, op1
);
1180 binoptab
= add_optab
;
1182 /* For shifts, constant invalid op1 might be expanded from different
1183 mode than MODE. As those are invalid, force them to a register
1184 to avoid further problems during expansion. */
1185 else if (CONST_INT_P (op1
)
1186 && shift_optab_p (binoptab
)
1187 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1189 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1190 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1193 /* Record where to delete back to if we backtrack. */
1194 last
= get_last_insn ();
1196 /* If we can do it with a three-operand insn, do so. */
1198 if (methods
!= OPTAB_MUST_WIDEN
)
1200 if (convert_optab_p (binoptab
))
1202 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1203 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1206 icode
= optab_handler (binoptab
, mode
);
1207 if (icode
!= CODE_FOR_nothing
)
1209 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1210 target
, unsignedp
, methods
, last
);
1216 /* If we were trying to rotate, and that didn't work, try rotating
1217 the other direction before falling back to shifts and bitwise-or. */
1218 if (((binoptab
== rotl_optab
1219 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1220 || (binoptab
== rotr_optab
1221 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1222 && is_int_mode (mode
, &int_mode
))
1224 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1226 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1228 if (CONST_INT_P (op1
))
1229 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1230 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1231 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1233 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1234 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1235 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1237 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1238 target
, unsignedp
, methods
, last
);
1243 /* If this is a multiply, see if we can do a widening operation that
1244 takes operands of this mode and makes a wider mode. */
1246 if (binoptab
== smul_optab
1247 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1248 && (convert_optab_handler ((unsignedp
1250 : smul_widen_optab
),
1251 wider_mode
, mode
) != CODE_FOR_nothing
))
1253 /* *_widen_optab needs to determine operand mode, make sure at least
1254 one operand has non-VOID mode. */
1255 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1256 op0
= force_reg (mode
, op0
);
1257 temp
= expand_binop (wider_mode
,
1258 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1259 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1263 if (GET_MODE_CLASS (mode
) == MODE_INT
1264 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1265 return gen_lowpart (mode
, temp
);
1267 return convert_to_mode (mode
, temp
, unsignedp
);
1271 /* If this is a vector shift by a scalar, see if we can do a vector
1272 shift by a vector. If so, broadcast the scalar into a vector. */
1273 if (mclass
== MODE_VECTOR_INT
)
1275 optab otheroptab
= unknown_optab
;
1277 if (binoptab
== ashl_optab
)
1278 otheroptab
= vashl_optab
;
1279 else if (binoptab
== ashr_optab
)
1280 otheroptab
= vashr_optab
;
1281 else if (binoptab
== lshr_optab
)
1282 otheroptab
= vlshr_optab
;
1283 else if (binoptab
== rotl_optab
)
1284 otheroptab
= vrotl_optab
;
1285 else if (binoptab
== rotr_optab
)
1286 otheroptab
= vrotr_optab
;
1289 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1291 /* The scalar may have been extended to be too wide. Truncate
1292 it back to the proper size to fit in the broadcast vector. */
1293 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1294 if (!CONST_INT_P (op1
)
1295 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1296 > GET_MODE_BITSIZE (inner_mode
)))
1297 op1
= force_reg (inner_mode
,
1298 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1300 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1303 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1304 target
, unsignedp
, methods
, last
);
1311 /* Look for a wider mode of the same class for which we think we
1312 can open-code the operation. Check for a widening multiply at the
1313 wider mode as well. */
1315 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1316 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1317 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1319 machine_mode next_mode
;
1320 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1321 || (binoptab
== smul_optab
1322 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1323 && (find_widening_optab_handler ((unsignedp
1325 : smul_widen_optab
),
1327 != CODE_FOR_nothing
)))
1329 rtx xop0
= op0
, xop1
= op1
;
1332 /* For certain integer operations, we need not actually extend
1333 the narrow operands, as long as we will truncate
1334 the results to the same narrowness. */
1336 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1337 || binoptab
== xor_optab
1338 || binoptab
== add_optab
|| binoptab
== sub_optab
1339 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1340 && mclass
== MODE_INT
)
1343 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1345 if (binoptab
!= ashl_optab
)
1346 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1350 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1352 /* The second operand of a shift must always be extended. */
1353 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1354 no_extend
&& binoptab
!= ashl_optab
);
1356 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1357 unsignedp
, OPTAB_DIRECT
);
1360 if (mclass
!= MODE_INT
1361 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1364 target
= gen_reg_rtx (mode
);
1365 convert_move (target
, temp
, 0);
1369 return gen_lowpart (mode
, temp
);
1372 delete_insns_since (last
);
1376 /* If operation is commutative,
1377 try to make the first operand a register.
1378 Even better, try to make it the same as the target.
1379 Also try to make the last operand a constant. */
1380 if (commutative_optab_p (binoptab
)
1381 && swap_commutative_operands_with_target (target
, op0
, op1
))
1382 std::swap (op0
, op1
);
1384 /* These can be done a word at a time. */
1385 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1386 && is_int_mode (mode
, &int_mode
)
1387 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1388 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1393 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1394 won't be accurate, so use a new target. */
1398 || !valid_multiword_target_p (target
))
1399 target
= gen_reg_rtx (int_mode
);
1403 /* Do the actual arithmetic. */
1404 machine_mode op0_mode
= GET_MODE (op0
);
1405 machine_mode op1_mode
= GET_MODE (op1
);
1406 if (op0_mode
== VOIDmode
)
1407 op0_mode
= int_mode
;
1408 if (op1_mode
== VOIDmode
)
1409 op1_mode
= int_mode
;
1410 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1412 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1413 rtx x
= expand_binop (word_mode
, binoptab
,
1414 operand_subword_force (op0
, i
, op0_mode
),
1415 operand_subword_force (op1
, i
, op1_mode
),
1416 target_piece
, unsignedp
, next_methods
);
1421 if (target_piece
!= x
)
1422 emit_move_insn (target_piece
, x
);
1425 insns
= get_insns ();
1428 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1435 /* Synthesize double word shifts from single word shifts. */
1436 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1437 || binoptab
== ashr_optab
)
1438 && is_int_mode (mode
, &int_mode
)
1439 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1440 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1441 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1442 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1443 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1444 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1446 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1447 scalar_int_mode op1_mode
;
1449 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1450 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1451 op1_mode
= (GET_MODE (op1
) != VOIDmode
1452 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1455 /* Apply the truncation to constant shifts. */
1456 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1457 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1459 if (op1
== CONST0_RTX (op1_mode
))
1462 /* Make sure that this is a combination that expand_doubleword_shift
1463 can handle. See the comments there for details. */
1464 if (double_shift_mask
== 0
1465 || (shift_mask
== BITS_PER_WORD
- 1
1466 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1469 rtx into_target
, outof_target
;
1470 rtx into_input
, outof_input
;
1471 int left_shift
, outof_word
;
1473 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1474 won't be accurate, so use a new target. */
1478 || !valid_multiword_target_p (target
))
1479 target
= gen_reg_rtx (int_mode
);
1483 /* OUTOF_* is the word we are shifting bits away from, and
1484 INTO_* is the word that we are shifting bits towards, thus
1485 they differ depending on the direction of the shift and
1486 WORDS_BIG_ENDIAN. */
1488 left_shift
= binoptab
== ashl_optab
;
1489 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1491 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1492 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1494 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1495 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1497 if (expand_doubleword_shift (op1_mode
, binoptab
,
1498 outof_input
, into_input
, op1
,
1499 outof_target
, into_target
,
1500 unsignedp
, next_methods
, shift_mask
))
1502 insns
= get_insns ();
1512 /* Synthesize double word rotates from single word shifts. */
1513 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1514 && is_int_mode (mode
, &int_mode
)
1515 && CONST_INT_P (op1
)
1516 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1517 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1518 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1521 rtx into_target
, outof_target
;
1522 rtx into_input
, outof_input
;
1524 int shift_count
, left_shift
, outof_word
;
1526 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1527 won't be accurate, so use a new target. Do this also if target is not
1528 a REG, first because having a register instead may open optimization
1529 opportunities, and second because if target and op0 happen to be MEMs
1530 designating the same location, we would risk clobbering it too early
1531 in the code sequence we generate below. */
1536 || !valid_multiword_target_p (target
))
1537 target
= gen_reg_rtx (int_mode
);
1541 shift_count
= INTVAL (op1
);
1543 /* OUTOF_* is the word we are shifting bits away from, and
1544 INTO_* is the word that we are shifting bits towards, thus
1545 they differ depending on the direction of the shift and
1546 WORDS_BIG_ENDIAN. */
1548 left_shift
= (binoptab
== rotl_optab
);
1549 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1551 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1552 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1554 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1555 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1557 if (shift_count
== BITS_PER_WORD
)
1559 /* This is just a word swap. */
1560 emit_move_insn (outof_target
, into_input
);
1561 emit_move_insn (into_target
, outof_input
);
1566 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1567 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1568 optab reverse_unsigned_shift
, unsigned_shift
;
1570 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1571 ? lshr_optab
: ashl_optab
);
1573 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1574 ? ashl_optab
: lshr_optab
);
1576 if (shift_count
> BITS_PER_WORD
)
1578 first_shift_count
= shift_count
- BITS_PER_WORD
;
1579 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1583 first_shift_count
= BITS_PER_WORD
- shift_count
;
1584 second_shift_count
= shift_count
;
1586 rtx first_shift_count_rtx
1587 = gen_int_shift_amount (word_mode
, first_shift_count
);
1588 rtx second_shift_count_rtx
1589 = gen_int_shift_amount (word_mode
, second_shift_count
);
1591 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1592 outof_input
, first_shift_count_rtx
,
1593 NULL_RTX
, unsignedp
, next_methods
);
1594 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1595 into_input
, second_shift_count_rtx
,
1596 NULL_RTX
, unsignedp
, next_methods
);
1598 if (into_temp1
!= 0 && into_temp2
!= 0)
1599 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1600 into_target
, unsignedp
, next_methods
);
1604 if (inter
!= 0 && inter
!= into_target
)
1605 emit_move_insn (into_target
, inter
);
1607 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1608 into_input
, first_shift_count_rtx
,
1609 NULL_RTX
, unsignedp
, next_methods
);
1610 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1611 outof_input
, second_shift_count_rtx
,
1612 NULL_RTX
, unsignedp
, next_methods
);
1614 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1615 inter
= expand_binop (word_mode
, ior_optab
,
1616 outof_temp1
, outof_temp2
,
1617 outof_target
, unsignedp
, next_methods
);
1619 if (inter
!= 0 && inter
!= outof_target
)
1620 emit_move_insn (outof_target
, inter
);
1623 insns
= get_insns ();
1633 /* These can be done a word at a time by propagating carries. */
1634 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1635 && is_int_mode (mode
, &int_mode
)
1636 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1637 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1640 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1641 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1642 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1643 rtx xop0
, xop1
, xtarget
;
1645 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1646 value is one of those, use it. Otherwise, use 1 since it is the
1647 one easiest to get. */
1648 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1649 int normalizep
= STORE_FLAG_VALUE
;
1654 /* Prepare the operands. */
1655 xop0
= force_reg (int_mode
, op0
);
1656 xop1
= force_reg (int_mode
, op1
);
1658 xtarget
= gen_reg_rtx (int_mode
);
1660 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1663 /* Indicate for flow that the entire target reg is being set. */
1665 emit_clobber (xtarget
);
1667 /* Do the actual arithmetic. */
1668 for (i
= 0; i
< nwords
; i
++)
1670 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1671 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
1672 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
1673 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
1676 /* Main add/subtract of the input operands. */
1677 x
= expand_binop (word_mode
, binoptab
,
1678 op0_piece
, op1_piece
,
1679 target_piece
, unsignedp
, next_methods
);
1685 /* Store carry from main add/subtract. */
1686 carry_out
= gen_reg_rtx (word_mode
);
1687 carry_out
= emit_store_flag_force (carry_out
,
1688 (binoptab
== add_optab
1691 word_mode
, 1, normalizep
);
1698 /* Add/subtract previous carry to main result. */
1699 newx
= expand_binop (word_mode
,
1700 normalizep
== 1 ? binoptab
: otheroptab
,
1702 NULL_RTX
, 1, next_methods
);
1706 /* Get out carry from adding/subtracting carry in. */
1707 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1708 carry_tmp
= emit_store_flag_force (carry_tmp
,
1709 (binoptab
== add_optab
1712 word_mode
, 1, normalizep
);
1714 /* Logical-ior the two poss. carry together. */
1715 carry_out
= expand_binop (word_mode
, ior_optab
,
1716 carry_out
, carry_tmp
,
1717 carry_out
, 0, next_methods
);
1721 emit_move_insn (target_piece
, newx
);
1725 if (x
!= target_piece
)
1726 emit_move_insn (target_piece
, x
);
1729 carry_in
= carry_out
;
1732 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
1734 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
1735 || ! rtx_equal_p (target
, xtarget
))
1737 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1739 set_dst_reg_note (temp
, REG_EQUAL
,
1740 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1741 int_mode
, copy_rtx (xop0
),
1752 delete_insns_since (last
);
1755 /* Attempt to synthesize double word multiplies using a sequence of word
1756 mode multiplications. We first attempt to generate a sequence using a
1757 more efficient unsigned widening multiply, and if that fails we then
1758 try using a signed widening multiply. */
1760 if (binoptab
== smul_optab
1761 && is_int_mode (mode
, &int_mode
)
1762 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1763 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1764 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1766 rtx product
= NULL_RTX
;
1767 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
1768 != CODE_FOR_nothing
)
1770 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
1773 delete_insns_since (last
);
1776 if (product
== NULL_RTX
1777 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
1778 != CODE_FOR_nothing
))
1780 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
1783 delete_insns_since (last
);
1786 if (product
!= NULL_RTX
)
1788 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
1790 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
1792 set_dst_reg_note (move
,
1794 gen_rtx_fmt_ee (MULT
, int_mode
,
1797 target
? target
: product
);
1803 /* It can't be open-coded in this mode.
1804 Use a library call if one is available and caller says that's ok. */
1806 libfunc
= optab_libfunc (binoptab
, mode
);
1808 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1812 machine_mode op1_mode
= mode
;
1817 if (shift_optab_p (binoptab
))
1819 op1_mode
= targetm
.libgcc_shift_count_mode ();
1820 /* Specify unsigned here,
1821 since negative shift counts are meaningless. */
1822 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1825 if (GET_MODE (op0
) != VOIDmode
1826 && GET_MODE (op0
) != mode
)
1827 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1829 /* Pass 1 for NO_QUEUE so we don't lose any increments
1830 if the libcall is cse'd or moved. */
1831 value
= emit_library_call_value (libfunc
,
1832 NULL_RTX
, LCT_CONST
, mode
,
1833 op0
, mode
, op1x
, op1_mode
);
1835 insns
= get_insns ();
1838 bool trapv
= trapv_binoptab_p (binoptab
);
1839 target
= gen_reg_rtx (mode
);
1840 emit_libcall_block_1 (insns
, target
, value
,
1842 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1843 mode
, op0
, op1
), trapv
);
1848 delete_insns_since (last
);
1850 /* It can't be done in this mode. Can we do it in a wider mode? */
1852 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1853 || methods
== OPTAB_MUST_WIDEN
))
1855 /* Caller says, don't even try. */
1856 delete_insns_since (entry_last
);
1860 /* Compute the value of METHODS to pass to recursive calls.
1861 Don't allow widening to be tried recursively. */
1863 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1865 /* Look for a wider mode of the same class for which it appears we can do
1868 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1870 /* This code doesn't make sense for conversion optabs, since we
1871 wouldn't then want to extend the operands to be the same size
1873 gcc_assert (!convert_optab_p (binoptab
));
1874 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1876 if (optab_handler (binoptab
, wider_mode
)
1877 || (methods
== OPTAB_LIB
1878 && optab_libfunc (binoptab
, wider_mode
)))
1880 rtx xop0
= op0
, xop1
= op1
;
1883 /* For certain integer operations, we need not actually extend
1884 the narrow operands, as long as we will truncate
1885 the results to the same narrowness. */
1887 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1888 || binoptab
== xor_optab
1889 || binoptab
== add_optab
|| binoptab
== sub_optab
1890 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1891 && mclass
== MODE_INT
)
1894 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1895 unsignedp
, no_extend
);
1897 /* The second operand of a shift must always be extended. */
1898 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1899 no_extend
&& binoptab
!= ashl_optab
);
1901 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1902 unsignedp
, methods
);
1905 if (mclass
!= MODE_INT
1906 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1909 target
= gen_reg_rtx (mode
);
1910 convert_move (target
, temp
, 0);
1914 return gen_lowpart (mode
, temp
);
1917 delete_insns_since (last
);
1922 delete_insns_since (entry_last
);
1926 /* Expand a binary operator which has both signed and unsigned forms.
1927 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1930 If we widen unsigned operands, we may use a signed wider operation instead
1931 of an unsigned wider operation, since the result would be the same. */
1934 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1935 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1936 enum optab_methods methods
)
1939 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1942 /* Do it without widening, if possible. */
1943 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1944 unsignedp
, OPTAB_DIRECT
);
1945 if (temp
|| methods
== OPTAB_DIRECT
)
1948 /* Try widening to a signed int. Disable any direct use of any
1949 signed insn in the current mode. */
1950 save_enable
= swap_optab_enable (soptab
, mode
, false);
1952 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1953 unsignedp
, OPTAB_WIDEN
);
1955 /* For unsigned operands, try widening to an unsigned int. */
1956 if (!temp
&& unsignedp
)
1957 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1958 unsignedp
, OPTAB_WIDEN
);
1959 if (temp
|| methods
== OPTAB_WIDEN
)
1962 /* Use the right width libcall if that exists. */
1963 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1964 unsignedp
, OPTAB_LIB
);
1965 if (temp
|| methods
== OPTAB_LIB
)
1968 /* Must widen and use a libcall, use either signed or unsigned. */
1969 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1970 unsignedp
, methods
);
1971 if (!temp
&& unsignedp
)
1972 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1973 unsignedp
, methods
);
1976 /* Undo the fiddling above. */
1978 swap_optab_enable (soptab
, mode
, true);
1982 /* Generate code to perform an operation specified by UNOPPTAB
1983 on operand OP0, with two results to TARG0 and TARG1.
1984 We assume that the order of the operands for the instruction
1985 is TARG0, TARG1, OP0.
1987 Either TARG0 or TARG1 may be zero, but what that means is that
1988 the result is not actually wanted. We will generate it into
1989 a dummy pseudo-reg and discard it. They may not both be zero.
1991 Returns 1 if this operation can be performed; 0 if not. */
1994 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1997 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1998 enum mode_class mclass
;
1999 machine_mode wider_mode
;
2000 rtx_insn
*entry_last
= get_last_insn ();
2003 mclass
= GET_MODE_CLASS (mode
);
2006 targ0
= gen_reg_rtx (mode
);
2008 targ1
= gen_reg_rtx (mode
);
2010 /* Record where to go back to if we fail. */
2011 last
= get_last_insn ();
2013 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2015 class expand_operand ops
[3];
2016 enum insn_code icode
= optab_handler (unoptab
, mode
);
2018 create_fixed_operand (&ops
[0], targ0
);
2019 create_fixed_operand (&ops
[1], targ1
);
2020 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2021 if (maybe_expand_insn (icode
, 3, ops
))
2025 /* It can't be done in this mode. Can we do it in a wider mode? */
2027 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2029 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2031 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2033 rtx t0
= gen_reg_rtx (wider_mode
);
2034 rtx t1
= gen_reg_rtx (wider_mode
);
2035 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2037 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2039 convert_move (targ0
, t0
, unsignedp
);
2040 convert_move (targ1
, t1
, unsignedp
);
2044 delete_insns_since (last
);
2049 delete_insns_since (entry_last
);
2053 /* Generate code to perform an operation specified by BINOPTAB
2054 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2055 We assume that the order of the operands for the instruction
2056 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2057 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2059 Either TARG0 or TARG1 may be zero, but what that means is that
2060 the result is not actually wanted. We will generate it into
2061 a dummy pseudo-reg and discard it. They may not both be zero.
2063 Returns 1 if this operation can be performed; 0 if not. */
2066 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2069 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2070 enum mode_class mclass
;
2071 machine_mode wider_mode
;
2072 rtx_insn
*entry_last
= get_last_insn ();
2075 mclass
= GET_MODE_CLASS (mode
);
2078 targ0
= gen_reg_rtx (mode
);
2080 targ1
= gen_reg_rtx (mode
);
2082 /* Record where to go back to if we fail. */
2083 last
= get_last_insn ();
2085 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2087 class expand_operand ops
[4];
2088 enum insn_code icode
= optab_handler (binoptab
, mode
);
2089 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2090 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2091 rtx xop0
= op0
, xop1
= op1
;
2093 /* If we are optimizing, force expensive constants into a register. */
2094 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2095 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2097 create_fixed_operand (&ops
[0], targ0
);
2098 create_convert_operand_from (&ops
[1], xop0
, mode
, unsignedp
);
2099 create_convert_operand_from (&ops
[2], xop1
, mode
, unsignedp
);
2100 create_fixed_operand (&ops
[3], targ1
);
2101 if (maybe_expand_insn (icode
, 4, ops
))
2103 delete_insns_since (last
);
2106 /* It can't be done in this mode. Can we do it in a wider mode? */
2108 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2110 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2112 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2114 rtx t0
= gen_reg_rtx (wider_mode
);
2115 rtx t1
= gen_reg_rtx (wider_mode
);
2116 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2117 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2119 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2122 convert_move (targ0
, t0
, unsignedp
);
2123 convert_move (targ1
, t1
, unsignedp
);
2127 delete_insns_since (last
);
2132 delete_insns_since (entry_last
);
2136 /* Expand the two-valued library call indicated by BINOPTAB, but
2137 preserve only one of the values. If TARG0 is non-NULL, the first
2138 value is placed into TARG0; otherwise the second value is placed
2139 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2140 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2141 This routine assumes that the value returned by the library call is
2142 as if the return value was of an integral mode twice as wide as the
2143 mode of OP0. Returns 1 if the call was successful. */
2146 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2147 rtx targ0
, rtx targ1
, enum rtx_code code
)
2150 machine_mode libval_mode
;
2155 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2156 gcc_assert (!targ0
!= !targ1
);
2158 mode
= GET_MODE (op0
);
2159 libfunc
= optab_libfunc (binoptab
, mode
);
2163 /* The value returned by the library function will have twice as
2164 many bits as the nominal MODE. */
2165 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2167 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2171 /* Get the part of VAL containing the value that we want. */
2172 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2173 targ0
? 0 : GET_MODE_SIZE (mode
));
2174 insns
= get_insns ();
2176 /* Move the into the desired location. */
2177 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2178 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2184 /* Wrapper around expand_unop which takes an rtx code to specify
2185 the operation to perform, not an optab pointer. All other
2186 arguments are the same. */
2188 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2189 rtx target
, int unsignedp
)
2191 optab unop
= code_to_optab (code
);
2194 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2200 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2202 A similar operation can be used for clrsb. UNOPTAB says which operation
2203 we are trying to expand. */
2205 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2207 opt_scalar_int_mode wider_mode_iter
;
2208 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2210 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2211 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2216 last
= get_last_insn ();
2219 target
= gen_reg_rtx (mode
);
2220 xop0
= widen_operand (op0
, wider_mode
, mode
,
2221 unoptab
!= clrsb_optab
, false);
2222 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2223 unoptab
!= clrsb_optab
);
2226 (wider_mode
, sub_optab
, temp
,
2227 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2228 - GET_MODE_PRECISION (mode
),
2230 target
, true, OPTAB_DIRECT
);
2232 delete_insns_since (last
);
2240 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2241 quantities, choosing which based on whether the high word is nonzero. */
2243 expand_doubleword_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2245 rtx xop0
= force_reg (mode
, op0
);
2246 rtx subhi
= gen_highpart (word_mode
, xop0
);
2247 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2248 rtx_code_label
*hi0_label
= gen_label_rtx ();
2249 rtx_code_label
*after_label
= gen_label_rtx ();
2253 /* If we were not given a target, use a word_mode register, not a
2254 'mode' register. The result will fit, and nobody is expecting
2255 anything bigger (the return type of __builtin_clz* is int). */
2257 target
= gen_reg_rtx (word_mode
);
2259 /* In any case, write to a word_mode scratch in both branches of the
2260 conditional, so we can ensure there is a single move insn setting
2261 'target' to tag a REG_EQUAL note on. */
2262 result
= gen_reg_rtx (word_mode
);
2266 /* If the high word is not equal to zero,
2267 then clz of the full value is clz of the high word. */
2268 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2269 word_mode
, true, hi0_label
);
2271 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2276 convert_move (result
, temp
, true);
2278 emit_jump_insn (targetm
.gen_jump (after_label
));
2281 /* Else clz of the full value is clz of the low word plus the number
2282 of bits in the high word. */
2283 emit_label (hi0_label
);
2285 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2288 temp
= expand_binop (word_mode
, add_optab
, temp
,
2289 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2290 result
, true, OPTAB_DIRECT
);
2294 convert_move (result
, temp
, true);
2296 emit_label (after_label
);
2297 convert_move (target
, result
, true);
2302 add_equal_note (seq
, target
, CLZ
, xop0
, NULL_RTX
, mode
);
2311 /* Try calculating popcount of a double-word quantity as two popcount's of
2312 word-sized quantities and summing up the results. */
2314 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2321 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2322 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2324 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2325 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2333 /* If we were not given a target, use a word_mode register, not a
2334 'mode' register. The result will fit, and nobody is expecting
2335 anything bigger (the return type of __builtin_popcount* is int). */
2337 target
= gen_reg_rtx (word_mode
);
2339 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2344 add_equal_note (seq
, t
, POPCOUNT
, op0
, NULL_RTX
, mode
);
2352 (parity:narrow (low (x) ^ high (x))) */
2354 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2356 rtx t
= expand_binop (word_mode
, xor_optab
,
2357 operand_subword_force (op0
, 0, mode
),
2358 operand_subword_force (op0
, 1, mode
),
2359 NULL_RTX
, 0, OPTAB_DIRECT
);
2360 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2366 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2368 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2372 opt_scalar_int_mode wider_mode_iter
;
2374 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2375 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2376 != CODE_FOR_nothing
)
2379 if (!wider_mode_iter
.exists ())
2382 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2383 last
= get_last_insn ();
2385 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2386 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2388 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2389 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2391 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2392 GET_MODE_BITSIZE (wider_mode
)
2393 - GET_MODE_BITSIZE (mode
),
2399 target
= gen_reg_rtx (mode
);
2400 emit_move_insn (target
, gen_lowpart (mode
, x
));
2403 delete_insns_since (last
);
2408 /* Try calculating bswap as two bswaps of two word-sized operands. */
2411 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2415 t1
= expand_unop (word_mode
, bswap_optab
,
2416 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2417 t0
= expand_unop (word_mode
, bswap_optab
,
2418 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2420 if (target
== 0 || !valid_multiword_target_p (target
))
2421 target
= gen_reg_rtx (mode
);
2423 emit_clobber (target
);
2424 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2425 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2430 /* Try calculating (parity x) as (and (popcount x) 1), where
2431 popcount can also be done in a wider mode. */
2433 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2435 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2436 opt_scalar_int_mode wider_mode_iter
;
2437 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2439 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2440 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2445 last
= get_last_insn ();
2447 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2448 target
= gen_reg_rtx (wider_mode
);
2450 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2451 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2454 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2455 target
, true, OPTAB_DIRECT
);
2459 if (mclass
!= MODE_INT
2460 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2461 return convert_to_mode (mode
, temp
, 0);
2463 return gen_lowpart (mode
, temp
);
2466 delete_insns_since (last
);
2472 /* Try calculating ctz(x) as K - clz(x & -x) ,
2473 where K is GET_MODE_PRECISION(mode) - 1.
2475 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2476 don't have to worry about what the hardware does in that case. (If
2477 the clz instruction produces the usual value at 0, which is K, the
2478 result of this code sequence will be -1; expand_ffs, below, relies
2479 on this. It might be nice to have it be K instead, for consistency
2480 with the (very few) processors that provide a ctz with a defined
2481 value, but that would take one more instruction, and it would be
2482 less convenient for expand_ffs anyway. */
2485 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2490 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2495 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2497 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2498 true, OPTAB_DIRECT
);
2500 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2502 temp
= expand_binop (mode
, sub_optab
,
2503 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2505 true, OPTAB_DIRECT
);
2515 add_equal_note (seq
, temp
, CTZ
, op0
, NULL_RTX
, mode
);
2521 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2522 else with the sequence used by expand_clz.
2524 The ffs builtin promises to return zero for a zero value and ctz/clz
2525 may have an undefined value in that case. If they do not give us a
2526 convenient value, we have to generate a test and branch. */
2528 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
2530 HOST_WIDE_INT val
= 0;
2531 bool defined_at_zero
= false;
2535 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2539 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2543 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2545 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2548 temp
= expand_ctz (mode
, op0
, 0);
2552 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2554 defined_at_zero
= true;
2555 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2561 if (defined_at_zero
&& val
== -1)
2562 /* No correction needed at zero. */;
2565 /* We don't try to do anything clever with the situation found
2566 on some processors (eg Alpha) where ctz(0:mode) ==
2567 bitsize(mode). If someone can think of a way to send N to -1
2568 and leave alone all values in the range 0..N-1 (where N is a
2569 power of two), cheaper than this test-and-branch, please add it.
2571 The test-and-branch is done after the operation itself, in case
2572 the operation sets condition codes that can be recycled for this.
2573 (This is true on i386, for instance.) */
2575 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2576 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2577 mode
, true, nonzero_label
);
2579 convert_move (temp
, GEN_INT (-1), false);
2580 emit_label (nonzero_label
);
2583 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2584 to produce a value in the range 0..bitsize. */
2585 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2586 target
, false, OPTAB_DIRECT
);
2593 add_equal_note (seq
, temp
, FFS
, op0
, NULL_RTX
, mode
);
2602 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2603 conditions, VAL may already be a SUBREG against which we cannot generate
2604 a further SUBREG. In this case, we expect forcing the value into a
2605 register will work around the situation. */
2608 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2612 ret
= lowpart_subreg (omode
, val
, imode
);
2615 val
= force_reg (imode
, val
);
2616 ret
= lowpart_subreg (omode
, val
, imode
);
2617 gcc_assert (ret
!= NULL
);
2622 /* Expand a floating point absolute value or negation operation via a
2623 logical operation on the sign bit. */
2626 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
2627 rtx op0
, rtx target
)
2629 const struct real_format
*fmt
;
2630 int bitpos
, word
, nwords
, i
;
2631 scalar_int_mode imode
;
2635 /* The format has to have a simple sign bit. */
2636 fmt
= REAL_MODE_FORMAT (mode
);
2640 bitpos
= fmt
->signbit_rw
;
2644 /* Don't create negative zeros if the format doesn't support them. */
2645 if (code
== NEG
&& !fmt
->has_signed_zero
)
2648 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2650 if (!int_mode_for_mode (mode
).exists (&imode
))
2659 if (FLOAT_WORDS_BIG_ENDIAN
)
2660 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2662 word
= bitpos
/ BITS_PER_WORD
;
2663 bitpos
= bitpos
% BITS_PER_WORD
;
2664 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2667 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2673 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2674 target
= gen_reg_rtx (mode
);
2680 for (i
= 0; i
< nwords
; ++i
)
2682 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2683 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2687 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2689 immed_wide_int_const (mask
, imode
),
2690 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2691 if (temp
!= targ_piece
)
2692 emit_move_insn (targ_piece
, temp
);
2695 emit_move_insn (targ_piece
, op0_piece
);
2698 insns
= get_insns ();
2705 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2706 gen_lowpart (imode
, op0
),
2707 immed_wide_int_const (mask
, imode
),
2708 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2709 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2711 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2712 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2719 /* As expand_unop, but will fail rather than attempt the operation in a
2720 different mode or with a libcall. */
2722 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2725 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2727 class expand_operand ops
[2];
2728 enum insn_code icode
= optab_handler (unoptab
, mode
);
2729 rtx_insn
*last
= get_last_insn ();
2732 create_output_operand (&ops
[0], target
, mode
);
2733 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2734 pat
= maybe_gen_insn (icode
, 2, ops
);
2737 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2738 && ! add_equal_note (pat
, ops
[0].value
,
2739 optab_to_code (unoptab
),
2740 ops
[1].value
, NULL_RTX
, mode
))
2742 delete_insns_since (last
);
2743 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2748 return ops
[0].value
;
2754 /* Generate code to perform an operation specified by UNOPTAB
2755 on operand OP0, with result having machine-mode MODE.
2757 UNSIGNEDP is for the case where we have to widen the operands
2758 to perform the operation. It says to use zero-extension.
2760 If TARGET is nonzero, the value
2761 is generated there, if it is convenient to do so.
2762 In all cases an rtx is returned for the locus of the value;
2763 this may or may not be TARGET. */
2766 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2769 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2770 machine_mode wider_mode
;
2771 scalar_int_mode int_mode
;
2772 scalar_float_mode float_mode
;
2776 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2780 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2782 /* Widening (or narrowing) clz needs special treatment. */
2783 if (unoptab
== clz_optab
)
2785 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2787 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
2791 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2792 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2794 temp
= expand_doubleword_clz (int_mode
, op0
, target
);
2803 if (unoptab
== clrsb_optab
)
2805 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2807 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
2814 if (unoptab
== popcount_optab
2815 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2816 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2817 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2818 && optimize_insn_for_speed_p ())
2820 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
2825 if (unoptab
== parity_optab
2826 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2827 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2828 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2829 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2830 && optimize_insn_for_speed_p ())
2832 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
2837 /* Widening (or narrowing) bswap needs special treatment. */
2838 if (unoptab
== bswap_optab
)
2840 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2841 or ROTATERT. First try these directly; if this fails, then try the
2842 obvious pair of shifts with allowed widening, as this will probably
2843 be always more efficient than the other fallback methods. */
2849 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2851 temp
= expand_binop (mode
, rotl_optab
, op0
,
2852 gen_int_shift_amount (mode
, 8),
2853 target
, unsignedp
, OPTAB_DIRECT
);
2858 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2860 temp
= expand_binop (mode
, rotr_optab
, op0
,
2861 gen_int_shift_amount (mode
, 8),
2862 target
, unsignedp
, OPTAB_DIRECT
);
2867 last
= get_last_insn ();
2869 temp1
= expand_binop (mode
, ashl_optab
, op0
,
2870 gen_int_shift_amount (mode
, 8), NULL_RTX
,
2871 unsignedp
, OPTAB_WIDEN
);
2872 temp2
= expand_binop (mode
, lshr_optab
, op0
,
2873 gen_int_shift_amount (mode
, 8), NULL_RTX
,
2874 unsignedp
, OPTAB_WIDEN
);
2877 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2878 unsignedp
, OPTAB_WIDEN
);
2883 delete_insns_since (last
);
2886 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2888 temp
= widen_bswap (int_mode
, op0
, target
);
2892 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2893 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2895 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2904 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2905 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2907 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2910 rtx_insn
*last
= get_last_insn ();
2912 /* For certain operations, we need not actually extend
2913 the narrow operand, as long as we will truncate the
2914 results to the same narrowness. */
2916 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2917 (unoptab
== neg_optab
2918 || unoptab
== one_cmpl_optab
)
2919 && mclass
== MODE_INT
);
2921 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2926 if (mclass
!= MODE_INT
2927 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2930 target
= gen_reg_rtx (mode
);
2931 convert_move (target
, temp
, 0);
2935 return gen_lowpart (mode
, temp
);
2938 delete_insns_since (last
);
2942 /* These can be done a word at a time. */
2943 if (unoptab
== one_cmpl_optab
2944 && is_int_mode (mode
, &int_mode
)
2945 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
2946 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2951 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2952 target
= gen_reg_rtx (int_mode
);
2956 /* Do the actual arithmetic. */
2957 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
2959 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
2960 rtx x
= expand_unop (word_mode
, unoptab
,
2961 operand_subword_force (op0
, i
, int_mode
),
2962 target_piece
, unsignedp
);
2964 if (target_piece
!= x
)
2965 emit_move_insn (target_piece
, x
);
2968 insns
= get_insns ();
2975 /* Emit ~op0 as op0 ^ -1. */
2976 if (unoptab
== one_cmpl_optab
2977 && (SCALAR_INT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2978 && optab_handler (xor_optab
, mode
) != CODE_FOR_nothing
)
2980 temp
= expand_binop (mode
, xor_optab
, op0
, CONSTM1_RTX (mode
),
2981 target
, unsignedp
, OPTAB_DIRECT
);
2986 if (optab_to_code (unoptab
) == NEG
)
2988 /* Try negating floating point values by flipping the sign bit. */
2989 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
2991 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
2996 /* If there is no negation pattern, and we have no negative zero,
2997 try subtracting from zero. */
2998 if (!HONOR_SIGNED_ZEROS (mode
))
3000 temp
= expand_binop (mode
, (unoptab
== negv_optab
3001 ? subv_optab
: sub_optab
),
3002 CONST0_RTX (mode
), op0
, target
,
3003 unsignedp
, OPTAB_DIRECT
);
3009 /* Try calculating parity (x) as popcount (x) % 2. */
3010 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3012 temp
= expand_parity (int_mode
, op0
, target
);
3017 /* Try implementing ffs (x) in terms of clz (x). */
3018 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3020 temp
= expand_ffs (int_mode
, op0
, target
);
3025 /* Try implementing ctz (x) in terms of clz (x). */
3026 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3028 temp
= expand_ctz (int_mode
, op0
, target
);
3034 /* Now try a library call in this mode. */
3035 libfunc
= optab_libfunc (unoptab
, mode
);
3041 machine_mode outmode
= mode
;
3043 /* All of these functions return small values. Thus we choose to
3044 have them return something that isn't a double-word. */
3045 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3046 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3047 || unoptab
== parity_optab
)
3049 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3050 optab_libfunc (unoptab
, mode
)));
3054 /* Pass 1 for NO_QUEUE so we don't lose any increments
3055 if the libcall is cse'd or moved. */
3056 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3058 insns
= get_insns ();
3061 target
= gen_reg_rtx (outmode
);
3062 bool trapv
= trapv_unoptab_p (unoptab
);
3064 eq_value
= NULL_RTX
;
3067 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3068 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3069 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3070 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3071 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3072 outmode
, eq_value
, mode
);
3074 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3079 /* It can't be done in this mode. Can we do it in a wider mode? */
3081 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3083 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3085 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3086 || optab_libfunc (unoptab
, wider_mode
))
3089 rtx_insn
*last
= get_last_insn ();
3091 /* For certain operations, we need not actually extend
3092 the narrow operand, as long as we will truncate the
3093 results to the same narrowness. */
3094 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3095 (unoptab
== neg_optab
3096 || unoptab
== one_cmpl_optab
3097 || unoptab
== bswap_optab
)
3098 && mclass
== MODE_INT
);
3100 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3103 /* If we are generating clz using wider mode, adjust the
3104 result. Similarly for clrsb. */
3105 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3108 scalar_int_mode wider_int_mode
3109 = as_a
<scalar_int_mode
> (wider_mode
);
3110 int_mode
= as_a
<scalar_int_mode
> (mode
);
3112 (wider_mode
, sub_optab
, temp
,
3113 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3114 - GET_MODE_PRECISION (int_mode
),
3116 target
, true, OPTAB_DIRECT
);
3119 /* Likewise for bswap. */
3120 if (unoptab
== bswap_optab
&& temp
!= 0)
3122 scalar_int_mode wider_int_mode
3123 = as_a
<scalar_int_mode
> (wider_mode
);
3124 int_mode
= as_a
<scalar_int_mode
> (mode
);
3125 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3126 == GET_MODE_BITSIZE (wider_int_mode
)
3127 && GET_MODE_PRECISION (int_mode
)
3128 == GET_MODE_BITSIZE (int_mode
));
3130 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3131 GET_MODE_BITSIZE (wider_int_mode
)
3132 - GET_MODE_BITSIZE (int_mode
),
3138 if (mclass
!= MODE_INT
)
3141 target
= gen_reg_rtx (mode
);
3142 convert_move (target
, temp
, 0);
3146 return gen_lowpart (mode
, temp
);
3149 delete_insns_since (last
);
3154 /* One final attempt at implementing negation via subtraction,
3155 this time allowing widening of the operand. */
3156 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3159 temp
= expand_binop (mode
,
3160 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3161 CONST0_RTX (mode
), op0
,
3162 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3170 /* Emit code to compute the absolute value of OP0, with result to
3171 TARGET if convenient. (TARGET may be 0.) The return value says
3172 where the result actually is to be found.
3174 MODE is the mode of the operand; the mode of the result is
3175 different but can be deduced from MODE.
3180 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3181 int result_unsignedp
)
3185 if (GET_MODE_CLASS (mode
) != MODE_INT
3187 result_unsignedp
= 1;
3189 /* First try to do it with a special abs instruction. */
3190 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3195 /* For floating point modes, try clearing the sign bit. */
3196 scalar_float_mode float_mode
;
3197 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3199 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3204 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3205 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3206 && !HONOR_SIGNED_ZEROS (mode
))
3208 rtx_insn
*last
= get_last_insn ();
3210 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3213 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3219 delete_insns_since (last
);
3222 /* If this machine has expensive jumps, we can do integer absolute
3223 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3224 where W is the width of MODE. */
3226 scalar_int_mode int_mode
;
3227 if (is_int_mode (mode
, &int_mode
)
3228 && BRANCH_COST (optimize_insn_for_speed_p (),
3231 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3232 GET_MODE_PRECISION (int_mode
) - 1,
3235 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3238 temp
= expand_binop (int_mode
,
3239 result_unsignedp
? sub_optab
: subv_optab
,
3240 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3250 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3251 int result_unsignedp
, int safe
)
3254 rtx_code_label
*op1
;
3256 if (GET_MODE_CLASS (mode
) != MODE_INT
3258 result_unsignedp
= 1;
3260 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3264 /* If that does not win, use conditional jump and negate. */
3266 /* It is safe to use the target if it is the same
3267 as the source if this is also a pseudo register */
3268 if (op0
== target
&& REG_P (op0
)
3269 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3272 op1
= gen_label_rtx ();
3273 if (target
== 0 || ! safe
3274 || GET_MODE (target
) != mode
3275 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3277 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3278 target
= gen_reg_rtx (mode
);
3280 emit_move_insn (target
, op0
);
3283 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3284 NULL_RTX
, NULL
, op1
,
3285 profile_probability::uninitialized ());
3287 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3290 emit_move_insn (target
, op0
);
3296 /* Emit code to compute the one's complement absolute value of OP0
3297 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3298 (TARGET may be NULL_RTX.) The return value says where the result
3299 actually is to be found.
3301 MODE is the mode of the operand; the mode of the result is
3302 different but can be deduced from MODE. */
3305 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3309 /* Not applicable for floating point modes. */
3310 if (FLOAT_MODE_P (mode
))
3313 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3314 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3316 rtx_insn
*last
= get_last_insn ();
3318 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3320 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3326 delete_insns_since (last
);
3329 /* If this machine has expensive jumps, we can do one's complement
3330 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3332 scalar_int_mode int_mode
;
3333 if (is_int_mode (mode
, &int_mode
)
3334 && BRANCH_COST (optimize_insn_for_speed_p (),
3337 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3338 GET_MODE_PRECISION (int_mode
) - 1,
3341 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3351 /* A subroutine of expand_copysign, perform the copysign operation using the
3352 abs and neg primitives advertised to exist on the target. The assumption
3353 is that we have a split register file, and leaving op0 in fp registers,
3354 and not playing with subregs so much, will help the register allocator. */
3357 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3358 int bitpos
, bool op0_is_abs
)
3360 scalar_int_mode imode
;
3361 enum insn_code icode
;
3363 rtx_code_label
*label
;
3368 /* Check if the back end provides an insn that handles signbit for the
3370 icode
= optab_handler (signbit_optab
, mode
);
3371 if (icode
!= CODE_FOR_nothing
)
3373 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3374 sign
= gen_reg_rtx (imode
);
3375 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3379 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3381 if (!int_mode_for_mode (mode
).exists (&imode
))
3383 op1
= gen_lowpart (imode
, op1
);
3390 if (FLOAT_WORDS_BIG_ENDIAN
)
3391 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3393 word
= bitpos
/ BITS_PER_WORD
;
3394 bitpos
= bitpos
% BITS_PER_WORD
;
3395 op1
= operand_subword_force (op1
, word
, mode
);
3398 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3399 sign
= expand_binop (imode
, and_optab
, op1
,
3400 immed_wide_int_const (mask
, imode
),
3401 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3406 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3413 if (target
== NULL_RTX
)
3414 target
= copy_to_reg (op0
);
3416 emit_move_insn (target
, op0
);
3419 label
= gen_label_rtx ();
3420 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3422 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3423 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3425 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3427 emit_move_insn (target
, op0
);
3435 /* A subroutine of expand_copysign, perform the entire copysign operation
3436 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3437 is true if op0 is known to have its sign bit clear. */
3440 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3441 int bitpos
, bool op0_is_abs
)
3443 scalar_int_mode imode
;
3444 int word
, nwords
, i
;
3448 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3450 if (!int_mode_for_mode (mode
).exists (&imode
))
3459 if (FLOAT_WORDS_BIG_ENDIAN
)
3460 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3462 word
= bitpos
/ BITS_PER_WORD
;
3463 bitpos
= bitpos
% BITS_PER_WORD
;
3464 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3467 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3472 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3473 target
= gen_reg_rtx (mode
);
3479 for (i
= 0; i
< nwords
; ++i
)
3481 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3482 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3488 = expand_binop (imode
, and_optab
, op0_piece
,
3489 immed_wide_int_const (~mask
, imode
),
3490 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3491 op1
= expand_binop (imode
, and_optab
,
3492 operand_subword_force (op1
, i
, mode
),
3493 immed_wide_int_const (mask
, imode
),
3494 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3496 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3497 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3498 if (temp
!= targ_piece
)
3499 emit_move_insn (targ_piece
, temp
);
3502 emit_move_insn (targ_piece
, op0_piece
);
3505 insns
= get_insns ();
3512 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3513 immed_wide_int_const (mask
, imode
),
3514 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3516 op0
= gen_lowpart (imode
, op0
);
3518 op0
= expand_binop (imode
, and_optab
, op0
,
3519 immed_wide_int_const (~mask
, imode
),
3520 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3522 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3523 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3524 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3530 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3531 scalar floating point mode. Return NULL if we do not know how to
3532 expand the operation inline. */
3535 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3537 scalar_float_mode mode
;
3538 const struct real_format
*fmt
;
3542 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
3543 gcc_assert (GET_MODE (op1
) == mode
);
3545 /* First try to do it with a special instruction. */
3546 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3547 target
, 0, OPTAB_DIRECT
);
3551 fmt
= REAL_MODE_FORMAT (mode
);
3552 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3556 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3558 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3559 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3563 if (fmt
->signbit_ro
>= 0
3564 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3565 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3566 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3568 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3569 fmt
->signbit_ro
, op0_is_abs
);
3574 if (fmt
->signbit_rw
< 0)
3576 return expand_copysign_bit (mode
, op0
, op1
, target
,
3577 fmt
->signbit_rw
, op0_is_abs
);
3580 /* Generate an instruction whose insn-code is INSN_CODE,
3581 with two operands: an output TARGET and an input OP0.
3582 TARGET *must* be nonzero, and the output is always stored there.
3583 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3584 the value that is stored into TARGET.
3586 Return false if expansion failed. */
3589 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3592 class expand_operand ops
[2];
3595 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3596 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3597 pat
= maybe_gen_insn (icode
, 2, ops
);
3601 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3603 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
,
3608 if (ops
[0].value
!= target
)
3609 emit_move_insn (target
, ops
[0].value
);
3612 /* Generate an instruction whose insn-code is INSN_CODE,
3613 with two operands: an output TARGET and an input OP0.
3614 TARGET *must* be nonzero, and the output is always stored there.
3615 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3616 the value that is stored into TARGET. */
3619 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3621 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3625 struct no_conflict_data
3628 rtx_insn
*first
, *insn
;
3632 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3633 the currently examined clobber / store has to stay in the list of
3634 insns that constitute the actual libcall block. */
3636 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3638 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3640 /* If this inns directly contributes to setting the target, it must stay. */
3641 if (reg_overlap_mentioned_p (p
->target
, dest
))
3642 p
->must_stay
= true;
3643 /* If we haven't committed to keeping any other insns in the list yet,
3644 there is nothing more to check. */
3645 else if (p
->insn
== p
->first
)
3647 /* If this insn sets / clobbers a register that feeds one of the insns
3648 already in the list, this insn has to stay too. */
3649 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3650 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3651 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3652 /* Likewise if this insn depends on a register set by a previous
3653 insn in the list, or if it sets a result (presumably a hard
3654 register) that is set or clobbered by a previous insn.
3655 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3656 SET_DEST perform the former check on the address, and the latter
3657 check on the MEM. */
3658 || (GET_CODE (set
) == SET
3659 && (modified_in_p (SET_SRC (set
), p
->first
)
3660 || modified_in_p (SET_DEST (set
), p
->first
)
3661 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3662 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3663 p
->must_stay
= true;
3667 /* Emit code to make a call to a constant function or a library call.
3669 INSNS is a list containing all insns emitted in the call.
3670 These insns leave the result in RESULT. Our block is to copy RESULT
3671 to TARGET, which is logically equivalent to EQUIV.
3673 We first emit any insns that set a pseudo on the assumption that these are
3674 loading constants into registers; doing so allows them to be safely cse'ed
3675 between blocks. Then we emit all the other insns in the block, followed by
3676 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3677 note with an operand of EQUIV. */
3680 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3681 bool equiv_may_trap
)
3683 rtx final_dest
= target
;
3684 rtx_insn
*next
, *last
, *insn
;
3686 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3687 into a MEM later. Protect the libcall block from this change. */
3688 if (! REG_P (target
) || REG_USERVAR_P (target
))
3689 target
= gen_reg_rtx (GET_MODE (target
));
3691 /* If we're using non-call exceptions, a libcall corresponding to an
3692 operation that may trap may also trap. */
3693 /* ??? See the comment in front of make_reg_eh_region_note. */
3694 if (cfun
->can_throw_non_call_exceptions
3695 && (equiv_may_trap
|| may_trap_p (equiv
)))
3697 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3700 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3703 int lp_nr
= INTVAL (XEXP (note
, 0));
3704 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3705 remove_note (insn
, note
);
3711 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3712 reg note to indicate that this call cannot throw or execute a nonlocal
3713 goto (unless there is already a REG_EH_REGION note, in which case
3715 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3717 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3720 /* First emit all insns that set pseudos. Remove them from the list as
3721 we go. Avoid insns that set pseudos which were referenced in previous
3722 insns. These can be generated by move_by_pieces, for example,
3723 to update an address. Similarly, avoid insns that reference things
3724 set in previous insns. */
3726 for (insn
= insns
; insn
; insn
= next
)
3728 rtx set
= single_set (insn
);
3730 next
= NEXT_INSN (insn
);
3732 if (set
!= 0 && REG_P (SET_DEST (set
))
3733 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3735 struct no_conflict_data data
;
3737 data
.target
= const0_rtx
;
3741 note_stores (insn
, no_conflict_move_test
, &data
);
3742 if (! data
.must_stay
)
3744 if (PREV_INSN (insn
))
3745 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3750 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3756 /* Some ports use a loop to copy large arguments onto the stack.
3757 Don't move anything outside such a loop. */
3762 /* Write the remaining insns followed by the final copy. */
3763 for (insn
= insns
; insn
; insn
= next
)
3765 next
= NEXT_INSN (insn
);
3770 last
= emit_move_insn (target
, result
);
3772 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3774 if (final_dest
!= target
)
3775 emit_move_insn (final_dest
, target
);
3779 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
3781 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3784 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3785 PURPOSE describes how this comparison will be used. CODE is the rtx
3786 comparison code we will be using.
3788 ??? Actually, CODE is slightly weaker than that. A target is still
3789 required to implement all of the normal bcc operations, but not
3790 required to implement all (or any) of the unordered bcc operations. */
3793 can_compare_p (enum rtx_code code
, machine_mode mode
,
3794 enum can_compare_purpose purpose
)
3797 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3800 enum insn_code icode
;
3802 if (purpose
== ccp_jump
3803 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3804 && insn_operand_matches (icode
, 0, test
))
3806 if (purpose
== ccp_store_flag
3807 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3808 && insn_operand_matches (icode
, 1, test
))
3810 if (purpose
== ccp_cmov
3811 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3814 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
3815 PUT_MODE (test
, mode
);
3817 while (mode
!= VOIDmode
);
3822 /* Return whether the backend can emit a vector comparison for code CODE,
3823 comparing operands of mode CMP_OP_MODE and producing a result with
3827 can_vcond_compare_p (enum rtx_code code
, machine_mode value_mode
,
3828 machine_mode cmp_op_mode
)
3830 enum insn_code icode
;
3831 bool unsigned_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
|| code
== GEU
);
3832 rtx reg1
= alloca_raw_REG (cmp_op_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3833 rtx reg2
= alloca_raw_REG (cmp_op_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3834 rtx test
= alloca_rtx_fmt_ee (code
, value_mode
, reg1
, reg2
);
3836 return (icode
= get_vcond_icode (value_mode
, cmp_op_mode
, unsigned_p
))
3838 && insn_operand_matches (icode
, 3, test
);
3841 /* This function is called when we are going to emit a compare instruction that
3842 compares the values found in X and Y, using the rtl operator COMPARISON.
3844 If they have mode BLKmode, then SIZE specifies the size of both operands.
3846 UNSIGNEDP nonzero says that the operands are unsigned;
3847 this matters if they need to be widened (as given by METHODS).
3849 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3850 if we failed to produce one.
3852 *PMODE is the mode of the inputs (in case they are const_int).
3854 This function performs all the setup necessary so that the caller only has
3855 to emit a single comparison insn. This setup can involve doing a BLKmode
3856 comparison or emitting a library call to perform the comparison if no insn
3857 is available to handle it.
3858 The values which are passed in through pointers can be modified; the caller
3859 should perform the comparison on the modified values. Constant
3860 comparisons must have already been folded. */
3863 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3864 int unsignedp
, enum optab_methods methods
,
3865 rtx
*ptest
, machine_mode
*pmode
)
3867 machine_mode mode
= *pmode
;
3869 machine_mode cmp_mode
;
3870 enum mode_class mclass
;
3872 /* The other methods are not needed. */
3873 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3874 || methods
== OPTAB_LIB_WIDEN
);
3876 if (CONST_SCALAR_INT_P (y
))
3877 canonicalize_comparison (mode
, &comparison
, &y
);
3879 /* If we are optimizing, force expensive constants into a register. */
3880 if (CONSTANT_P (x
) && optimize
3881 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3882 > COSTS_N_INSNS (1)))
3883 x
= force_reg (mode
, x
);
3885 if (CONSTANT_P (y
) && optimize
3886 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3887 > COSTS_N_INSNS (1)))
3888 y
= force_reg (mode
, y
);
3891 /* Make sure if we have a canonical comparison. The RTL
3892 documentation states that canonical comparisons are required only
3893 for targets which have cc0. */
3894 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3897 /* Don't let both operands fail to indicate the mode. */
3898 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3899 x
= force_reg (mode
, x
);
3900 if (mode
== VOIDmode
)
3901 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3903 /* Handle all BLKmode compares. */
3905 if (mode
== BLKmode
)
3907 machine_mode result_mode
;
3908 enum insn_code cmp_code
;
3911 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3915 /* Try to use a memory block compare insn - either cmpstr
3916 or cmpmem will do. */
3917 opt_scalar_int_mode cmp_mode_iter
;
3918 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
3920 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
3921 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3922 if (cmp_code
== CODE_FOR_nothing
)
3923 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3924 if (cmp_code
== CODE_FOR_nothing
)
3925 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3926 if (cmp_code
== CODE_FOR_nothing
)
3929 /* Must make sure the size fits the insn's mode. */
3930 if (CONST_INT_P (size
)
3931 ? UINTVAL (size
) > GET_MODE_MASK (cmp_mode
)
3932 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
3933 > GET_MODE_BITSIZE (cmp_mode
)))
3936 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3937 result
= gen_reg_rtx (result_mode
);
3938 size
= convert_to_mode (cmp_mode
, size
, 1);
3939 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3941 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3942 *pmode
= result_mode
;
3946 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3949 /* Otherwise call a library function. */
3950 result
= emit_block_comp_via_libcall (x
, y
, size
);
3954 mode
= TYPE_MODE (integer_type_node
);
3955 methods
= OPTAB_LIB_WIDEN
;
3959 /* Don't allow operands to the compare to trap, as that can put the
3960 compare and branch in different basic blocks. */
3961 if (cfun
->can_throw_non_call_exceptions
)
3964 x
= copy_to_reg (x
);
3966 y
= copy_to_reg (y
);
3969 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3971 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3972 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3973 gcc_assert (icode
!= CODE_FOR_nothing
3974 && insn_operand_matches (icode
, 0, test
));
3979 mclass
= GET_MODE_CLASS (mode
);
3980 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3981 FOR_EACH_MODE_FROM (cmp_mode
, mode
)
3983 enum insn_code icode
;
3984 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3985 if (icode
!= CODE_FOR_nothing
3986 && insn_operand_matches (icode
, 0, test
))
3988 rtx_insn
*last
= get_last_insn ();
3989 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3990 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3992 && insn_operand_matches (icode
, 1, op0
)
3993 && insn_operand_matches (icode
, 2, op1
))
3995 XEXP (test
, 0) = op0
;
3996 XEXP (test
, 1) = op1
;
4001 delete_insns_since (last
);
4004 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4008 if (methods
!= OPTAB_LIB_WIDEN
)
4011 if (SCALAR_FLOAT_MODE_P (mode
))
4013 /* Small trick if UNORDERED isn't implemented by the hardware. */
4014 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
4016 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4022 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4027 machine_mode ret_mode
;
4029 /* Handle a libcall just for the mode we are using. */
4030 libfunc
= optab_libfunc (cmp_optab
, mode
);
4031 gcc_assert (libfunc
);
4033 /* If we want unsigned, and this mode has a distinct unsigned
4034 comparison routine, use that. */
4037 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4042 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4043 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4044 ret_mode
, x
, mode
, y
, mode
);
4046 /* There are two kinds of comparison routines. Biased routines
4047 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4048 of gcc expect that the comparison operation is equivalent
4049 to the modified comparison. For signed comparisons compare the
4050 result against 1 in the biased case, and zero in the unbiased
4051 case. For unsigned comparisons always compare against 1 after
4052 biasing the unbiased result by adding 1. This gives us a way to
4054 The comparisons in the fixed-point helper library are always
4059 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4062 x
= plus_constant (ret_mode
, result
, 1);
4068 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4078 /* Before emitting an insn with code ICODE, make sure that X, which is going
4079 to be used for operand OPNUM of the insn, is converted from mode MODE to
4080 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4081 that it is accepted by the operand predicate. Return the new value. */
4084 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4085 machine_mode wider_mode
, int unsignedp
)
4087 if (mode
!= wider_mode
)
4088 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4090 if (!insn_operand_matches (icode
, opnum
, x
))
4092 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4093 if (reload_completed
)
4095 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4097 x
= copy_to_mode_reg (op_mode
, x
);
4103 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4104 we can do the branch. */
4107 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4108 profile_probability prob
)
4110 machine_mode optab_mode
;
4111 enum mode_class mclass
;
4112 enum insn_code icode
;
4115 mclass
= GET_MODE_CLASS (mode
);
4116 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4117 icode
= optab_handler (cbranch_optab
, optab_mode
);
4119 gcc_assert (icode
!= CODE_FOR_nothing
);
4120 gcc_assert (insn_operand_matches (icode
, 0, test
));
4121 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4122 XEXP (test
, 1), label
));
4123 if (prob
.initialized_p ()
4124 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4127 && any_condjump_p (insn
)
4128 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4129 add_reg_br_prob_note (insn
, prob
);
4132 /* Generate code to compare X with Y so that the condition codes are
4133 set and to jump to LABEL if the condition is true. If X is a
4134 constant and Y is not a constant, then the comparison is swapped to
4135 ensure that the comparison RTL has the canonical form.
4137 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4138 need to be widened. UNSIGNEDP is also used to select the proper
4139 branch condition code.
4141 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4143 MODE is the mode of the inputs (in case they are const_int).
4145 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4146 It will be potentially converted into an unsigned variant based on
4147 UNSIGNEDP to select a proper jump instruction.
4149 PROB is the probability of jumping to LABEL. */
4152 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4153 machine_mode mode
, int unsignedp
, rtx label
,
4154 profile_probability prob
)
4156 rtx op0
= x
, op1
= y
;
4159 /* Swap operands and condition to ensure canonical RTL. */
4160 if (swap_commutative_operands_p (x
, y
)
4161 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4164 comparison
= swap_condition (comparison
);
4167 /* If OP0 is still a constant, then both X and Y must be constants
4168 or the opposite comparison is not supported. Force X into a register
4169 to create canonical RTL. */
4170 if (CONSTANT_P (op0
))
4171 op0
= force_reg (mode
, op0
);
4174 comparison
= unsigned_condition (comparison
);
4176 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4178 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4182 /* Emit a library call comparison between floating point X and Y.
4183 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4186 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4187 rtx
*ptest
, machine_mode
*pmode
)
4189 enum rtx_code swapped
= swap_condition (comparison
);
4190 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4191 machine_mode orig_mode
= GET_MODE (x
);
4193 rtx true_rtx
, false_rtx
;
4194 rtx value
, target
, equiv
;
4197 bool reversed_p
= false;
4198 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4200 FOR_EACH_MODE_FROM (mode
, orig_mode
)
4202 if (code_to_optab (comparison
)
4203 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4206 if (code_to_optab (swapped
)
4207 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4210 comparison
= swapped
;
4214 if (code_to_optab (reversed
)
4215 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4217 comparison
= reversed
;
4223 gcc_assert (mode
!= VOIDmode
);
4225 if (mode
!= orig_mode
)
4227 x
= convert_to_mode (mode
, x
, 0);
4228 y
= convert_to_mode (mode
, y
, 0);
4231 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4232 the RTL. The allows the RTL optimizers to delete the libcall if the
4233 condition can be determined at compile-time. */
4234 if (comparison
== UNORDERED
4235 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4237 true_rtx
= const_true_rtx
;
4238 false_rtx
= const0_rtx
;
4245 true_rtx
= const0_rtx
;
4246 false_rtx
= const_true_rtx
;
4250 true_rtx
= const_true_rtx
;
4251 false_rtx
= const0_rtx
;
4255 true_rtx
= const1_rtx
;
4256 false_rtx
= const0_rtx
;
4260 true_rtx
= const0_rtx
;
4261 false_rtx
= constm1_rtx
;
4265 true_rtx
= constm1_rtx
;
4266 false_rtx
= const0_rtx
;
4270 true_rtx
= const0_rtx
;
4271 false_rtx
= const1_rtx
;
4279 if (comparison
== UNORDERED
)
4281 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4282 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4283 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4284 temp
, const_true_rtx
, equiv
);
4288 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4289 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4290 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4291 equiv
, true_rtx
, false_rtx
);
4295 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4296 cmp_mode
, x
, mode
, y
, mode
);
4297 insns
= get_insns ();
4300 target
= gen_reg_rtx (cmp_mode
);
4301 emit_libcall_block (insns
, target
, value
, equiv
);
4303 if (comparison
== UNORDERED
4304 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4306 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4308 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4313 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4316 emit_indirect_jump (rtx loc
)
4318 if (!targetm
.have_indirect_jump ())
4319 sorry ("indirect jumps are not available on this target");
4322 class expand_operand ops
[1];
4323 create_address_operand (&ops
[0], loc
);
4324 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4330 /* Emit a conditional move instruction if the machine supports one for that
4331 condition and machine mode.
4333 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4334 the mode to use should they be constants. If it is VOIDmode, they cannot
4337 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4338 should be stored there. MODE is the mode to use should they be constants.
4339 If it is VOIDmode, they cannot both be constants.
4341 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4342 is not supported. */
4345 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4346 machine_mode cmode
, rtx op2
, rtx op3
,
4347 machine_mode mode
, int unsignedp
)
4351 enum insn_code icode
;
4352 enum rtx_code reversed
;
4354 /* If the two source operands are identical, that's just a move. */
4356 if (rtx_equal_p (op2
, op3
))
4359 target
= gen_reg_rtx (mode
);
4361 emit_move_insn (target
, op3
);
4365 /* If one operand is constant, make it the second one. Only do this
4366 if the other operand is not constant as well. */
4368 if (swap_commutative_operands_p (op0
, op1
))
4370 std::swap (op0
, op1
);
4371 code
= swap_condition (code
);
4374 /* get_condition will prefer to generate LT and GT even if the old
4375 comparison was against zero, so undo that canonicalization here since
4376 comparisons against zero are cheaper. */
4377 if (code
== LT
&& op1
== const1_rtx
)
4378 code
= LE
, op1
= const0_rtx
;
4379 else if (code
== GT
&& op1
== constm1_rtx
)
4380 code
= GE
, op1
= const0_rtx
;
4382 if (cmode
== VOIDmode
)
4383 cmode
= GET_MODE (op0
);
4385 enum rtx_code orig_code
= code
;
4386 bool swapped
= false;
4387 if (swap_commutative_operands_p (op2
, op3
)
4388 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4391 std::swap (op2
, op3
);
4396 if (mode
== VOIDmode
)
4397 mode
= GET_MODE (op2
);
4399 icode
= direct_optab_handler (movcc_optab
, mode
);
4401 if (icode
== CODE_FOR_nothing
)
4405 target
= gen_reg_rtx (mode
);
4407 for (int pass
= 0; ; pass
++)
4409 code
= unsignedp
? unsigned_condition (code
) : code
;
4410 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4412 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4413 punt and let the caller figure out how best to deal with this
4415 if (COMPARISON_P (comparison
))
4417 saved_pending_stack_adjust save
;
4418 save_pending_stack_adjust (&save
);
4419 last
= get_last_insn ();
4420 do_pending_stack_adjust ();
4421 machine_mode cmpmode
= cmode
;
4422 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4423 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4424 OPTAB_WIDEN
, &comparison
, &cmpmode
);
4427 class expand_operand ops
[4];
4429 create_output_operand (&ops
[0], target
, mode
);
4430 create_fixed_operand (&ops
[1], comparison
);
4431 create_input_operand (&ops
[2], op2
, mode
);
4432 create_input_operand (&ops
[3], op3
, mode
);
4433 if (maybe_expand_insn (icode
, 4, ops
))
4435 if (ops
[0].value
!= target
)
4436 convert_move (target
, ops
[0].value
, false);
4440 delete_insns_since (last
);
4441 restore_pending_stack_adjust (&save
);
4447 /* If the preferred op2/op3 order is not usable, retry with other
4448 operand order, perhaps it will expand successfully. */
4451 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4457 std::swap (op2
, op3
);
4462 /* Emit a conditional negate or bitwise complement using the
4463 negcc or notcc optabs if available. Return NULL_RTX if such operations
4464 are not available. Otherwise return the RTX holding the result.
4465 TARGET is the desired destination of the result. COMP is the comparison
4466 on which to negate. If COND is true move into TARGET the negation
4467 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4468 CODE is either NEG or NOT. MODE is the machine mode in which the
4469 operation is performed. */
4472 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4473 machine_mode mode
, rtx cond
, rtx op1
,
4476 optab op
= unknown_optab
;
4479 else if (code
== NOT
)
4484 insn_code icode
= direct_optab_handler (op
, mode
);
4486 if (icode
== CODE_FOR_nothing
)
4490 target
= gen_reg_rtx (mode
);
4492 rtx_insn
*last
= get_last_insn ();
4493 class expand_operand ops
[4];
4495 create_output_operand (&ops
[0], target
, mode
);
4496 create_fixed_operand (&ops
[1], cond
);
4497 create_input_operand (&ops
[2], op1
, mode
);
4498 create_input_operand (&ops
[3], op2
, mode
);
4500 if (maybe_expand_insn (icode
, 4, ops
))
4502 if (ops
[0].value
!= target
)
4503 convert_move (target
, ops
[0].value
, false);
4507 delete_insns_since (last
);
4511 /* Emit a conditional addition instruction if the machine supports one for that
4512 condition and machine mode.
4514 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4515 the mode to use should they be constants. If it is VOIDmode, they cannot
4518 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4519 should be stored there. MODE is the mode to use should they be constants.
4520 If it is VOIDmode, they cannot both be constants.
4522 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4523 is not supported. */
4526 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4527 machine_mode cmode
, rtx op2
, rtx op3
,
4528 machine_mode mode
, int unsignedp
)
4532 enum insn_code icode
;
4534 /* If one operand is constant, make it the second one. Only do this
4535 if the other operand is not constant as well. */
4537 if (swap_commutative_operands_p (op0
, op1
))
4539 std::swap (op0
, op1
);
4540 code
= swap_condition (code
);
4543 /* get_condition will prefer to generate LT and GT even if the old
4544 comparison was against zero, so undo that canonicalization here since
4545 comparisons against zero are cheaper. */
4546 if (code
== LT
&& op1
== const1_rtx
)
4547 code
= LE
, op1
= const0_rtx
;
4548 else if (code
== GT
&& op1
== constm1_rtx
)
4549 code
= GE
, op1
= const0_rtx
;
4551 if (cmode
== VOIDmode
)
4552 cmode
= GET_MODE (op0
);
4554 if (mode
== VOIDmode
)
4555 mode
= GET_MODE (op2
);
4557 icode
= optab_handler (addcc_optab
, mode
);
4559 if (icode
== CODE_FOR_nothing
)
4563 target
= gen_reg_rtx (mode
);
4565 code
= unsignedp
? unsigned_condition (code
) : code
;
4566 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4568 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4569 return NULL and let the caller figure out how best to deal with this
4571 if (!COMPARISON_P (comparison
))
4574 do_pending_stack_adjust ();
4575 last
= get_last_insn ();
4576 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4577 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4578 &comparison
, &cmode
);
4581 class expand_operand ops
[4];
4583 create_output_operand (&ops
[0], target
, mode
);
4584 create_fixed_operand (&ops
[1], comparison
);
4585 create_input_operand (&ops
[2], op2
, mode
);
4586 create_input_operand (&ops
[3], op3
, mode
);
4587 if (maybe_expand_insn (icode
, 4, ops
))
4589 if (ops
[0].value
!= target
)
4590 convert_move (target
, ops
[0].value
, false);
4594 delete_insns_since (last
);
4598 /* These functions attempt to generate an insn body, rather than
4599 emitting the insn, but if the gen function already emits them, we
4600 make no attempt to turn them back into naked patterns. */
4602 /* Generate and return an insn body to add Y to X. */
4605 gen_add2_insn (rtx x
, rtx y
)
4607 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4609 gcc_assert (insn_operand_matches (icode
, 0, x
));
4610 gcc_assert (insn_operand_matches (icode
, 1, x
));
4611 gcc_assert (insn_operand_matches (icode
, 2, y
));
4613 return GEN_FCN (icode
) (x
, x
, y
);
4616 /* Generate and return an insn body to add r1 and c,
4617 storing the result in r0. */
4620 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4622 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4624 if (icode
== CODE_FOR_nothing
4625 || !insn_operand_matches (icode
, 0, r0
)
4626 || !insn_operand_matches (icode
, 1, r1
)
4627 || !insn_operand_matches (icode
, 2, c
))
4630 return GEN_FCN (icode
) (r0
, r1
, c
);
4634 have_add2_insn (rtx x
, rtx y
)
4636 enum insn_code icode
;
4638 gcc_assert (GET_MODE (x
) != VOIDmode
);
4640 icode
= optab_handler (add_optab
, GET_MODE (x
));
4642 if (icode
== CODE_FOR_nothing
)
4645 if (!insn_operand_matches (icode
, 0, x
)
4646 || !insn_operand_matches (icode
, 1, x
)
4647 || !insn_operand_matches (icode
, 2, y
))
4653 /* Generate and return an insn body to add Y to X. */
4656 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4658 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4660 gcc_assert (insn_operand_matches (icode
, 0, x
));
4661 gcc_assert (insn_operand_matches (icode
, 1, y
));
4662 gcc_assert (insn_operand_matches (icode
, 2, z
));
4664 return GEN_FCN (icode
) (x
, y
, z
);
4667 /* Return true if the target implements an addptr pattern and X, Y,
4668 and Z are valid for the pattern predicates. */
4671 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4673 enum insn_code icode
;
4675 gcc_assert (GET_MODE (x
) != VOIDmode
);
4677 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4679 if (icode
== CODE_FOR_nothing
)
4682 if (!insn_operand_matches (icode
, 0, x
)
4683 || !insn_operand_matches (icode
, 1, y
)
4684 || !insn_operand_matches (icode
, 2, z
))
4690 /* Generate and return an insn body to subtract Y from X. */
4693 gen_sub2_insn (rtx x
, rtx y
)
4695 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4697 gcc_assert (insn_operand_matches (icode
, 0, x
));
4698 gcc_assert (insn_operand_matches (icode
, 1, x
));
4699 gcc_assert (insn_operand_matches (icode
, 2, y
));
4701 return GEN_FCN (icode
) (x
, x
, y
);
4704 /* Generate and return an insn body to subtract r1 and c,
4705 storing the result in r0. */
4708 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4710 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4712 if (icode
== CODE_FOR_nothing
4713 || !insn_operand_matches (icode
, 0, r0
)
4714 || !insn_operand_matches (icode
, 1, r1
)
4715 || !insn_operand_matches (icode
, 2, c
))
4718 return GEN_FCN (icode
) (r0
, r1
, c
);
4722 have_sub2_insn (rtx x
, rtx y
)
4724 enum insn_code icode
;
4726 gcc_assert (GET_MODE (x
) != VOIDmode
);
4728 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4730 if (icode
== CODE_FOR_nothing
)
4733 if (!insn_operand_matches (icode
, 0, x
)
4734 || !insn_operand_matches (icode
, 1, x
)
4735 || !insn_operand_matches (icode
, 2, y
))
4741 /* Generate the body of an insn to extend Y (with mode MFROM)
4742 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4745 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4746 machine_mode mfrom
, int unsignedp
)
4748 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4749 return GEN_FCN (icode
) (x
, y
);
4752 /* Generate code to convert FROM to floating point
4753 and store in TO. FROM must be fixed point and not VOIDmode.
4754 UNSIGNEDP nonzero means regard FROM as unsigned.
4755 Normally this is done by correcting the final value
4756 if it is negative. */
4759 expand_float (rtx to
, rtx from
, int unsignedp
)
4761 enum insn_code icode
;
4763 scalar_mode from_mode
, to_mode
;
4764 machine_mode fmode
, imode
;
4765 bool can_do_signed
= false;
4767 /* Crash now, because we won't be able to decide which mode to use. */
4768 gcc_assert (GET_MODE (from
) != VOIDmode
);
4770 /* Look for an insn to do the conversion. Do it in the specified
4771 modes if possible; otherwise convert either input, output or both to
4772 wider mode. If the integer mode is wider than the mode of FROM,
4773 we can do the conversion signed even if the input is unsigned. */
4775 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
4776 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
4778 int doing_unsigned
= unsignedp
;
4780 if (fmode
!= GET_MODE (to
)
4781 && (significand_size (fmode
)
4782 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
4785 icode
= can_float_p (fmode
, imode
, unsignedp
);
4786 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4788 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4789 if (scode
!= CODE_FOR_nothing
)
4790 can_do_signed
= true;
4791 if (imode
!= GET_MODE (from
))
4792 icode
= scode
, doing_unsigned
= 0;
4795 if (icode
!= CODE_FOR_nothing
)
4797 if (imode
!= GET_MODE (from
))
4798 from
= convert_to_mode (imode
, from
, unsignedp
);
4800 if (fmode
!= GET_MODE (to
))
4801 target
= gen_reg_rtx (fmode
);
4803 emit_unop_insn (icode
, target
, from
,
4804 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4807 convert_move (to
, target
, 0);
4812 /* Unsigned integer, and no way to convert directly. Convert as signed,
4813 then unconditionally adjust the result. */
4816 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
4817 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
4819 opt_scalar_mode fmode_iter
;
4820 rtx_code_label
*label
= gen_label_rtx ();
4822 REAL_VALUE_TYPE offset
;
4824 /* Look for a usable floating mode FMODE wider than the source and at
4825 least as wide as the target. Using FMODE will avoid rounding woes
4826 with unsigned values greater than the signed maximum value. */
4828 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
4830 scalar_mode fmode
= fmode_iter
.require ();
4831 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
4832 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
4836 if (!fmode_iter
.exists (&fmode
))
4838 /* There is no such mode. Pretend the target is wide enough. */
4841 /* Avoid double-rounding when TO is narrower than FROM. */
4842 if ((significand_size (fmode
) + 1)
4843 < GET_MODE_PRECISION (from_mode
))
4846 rtx_code_label
*neglabel
= gen_label_rtx ();
4848 /* Don't use TARGET if it isn't a register, is a hard register,
4849 or is the wrong mode. */
4851 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4852 || GET_MODE (target
) != fmode
)
4853 target
= gen_reg_rtx (fmode
);
4856 do_pending_stack_adjust ();
4858 /* Test whether the sign bit is set. */
4859 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4862 /* The sign bit is not set. Convert as signed. */
4863 expand_float (target
, from
, 0);
4864 emit_jump_insn (targetm
.gen_jump (label
));
4867 /* The sign bit is set.
4868 Convert to a usable (positive signed) value by shifting right
4869 one bit, while remembering if a nonzero bit was shifted
4870 out; i.e., compute (from & 1) | (from >> 1). */
4872 emit_label (neglabel
);
4873 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4874 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4875 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4876 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4878 expand_float (target
, temp
, 0);
4880 /* Multiply by 2 to undo the shift above. */
4881 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4882 target
, 0, OPTAB_LIB_WIDEN
);
4884 emit_move_insn (target
, temp
);
4886 do_pending_stack_adjust ();
4892 /* If we are about to do some arithmetic to correct for an
4893 unsigned operand, do it in a pseudo-register. */
4895 if (to_mode
!= fmode
4896 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4897 target
= gen_reg_rtx (fmode
);
4899 /* Convert as signed integer to floating. */
4900 expand_float (target
, from
, 0);
4902 /* If FROM is negative (and therefore TO is negative),
4903 correct its value by 2**bitwidth. */
4905 do_pending_stack_adjust ();
4906 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
4910 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
4911 temp
= expand_binop (fmode
, add_optab
, target
,
4912 const_double_from_real_value (offset
, fmode
),
4913 target
, 0, OPTAB_LIB_WIDEN
);
4915 emit_move_insn (target
, temp
);
4917 do_pending_stack_adjust ();
4922 /* No hardware instruction available; call a library routine. */
4927 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4929 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
4930 from
= convert_to_mode (SImode
, from
, unsignedp
);
4932 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4933 gcc_assert (libfunc
);
4937 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4938 GET_MODE (to
), from
, GET_MODE (from
));
4939 insns
= get_insns ();
4942 emit_libcall_block (insns
, target
, value
,
4943 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4944 GET_MODE (to
), from
));
4949 /* Copy result to requested destination
4950 if we have been computing in a temp location. */
4954 if (GET_MODE (target
) == GET_MODE (to
))
4955 emit_move_insn (to
, target
);
4957 convert_move (to
, target
, 0);
4961 /* Generate code to convert FROM to fixed point and store in TO. FROM
4962 must be floating point. */
4965 expand_fix (rtx to
, rtx from
, int unsignedp
)
4967 enum insn_code icode
;
4969 machine_mode fmode
, imode
;
4970 opt_scalar_mode fmode_iter
;
4971 bool must_trunc
= false;
4973 /* We first try to find a pair of modes, one real and one integer, at
4974 least as wide as FROM and TO, respectively, in which we can open-code
4975 this conversion. If the integer mode is wider than the mode of TO,
4976 we can do the conversion either signed or unsigned. */
4978 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
4979 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
4981 int doing_unsigned
= unsignedp
;
4983 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4984 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4985 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4987 if (icode
!= CODE_FOR_nothing
)
4989 rtx_insn
*last
= get_last_insn ();
4990 if (fmode
!= GET_MODE (from
))
4991 from
= convert_to_mode (fmode
, from
, 0);
4995 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4996 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5000 if (imode
!= GET_MODE (to
))
5001 target
= gen_reg_rtx (imode
);
5003 if (maybe_emit_unop_insn (icode
, target
, from
,
5004 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5007 convert_move (to
, target
, unsignedp
);
5010 delete_insns_since (last
);
5014 /* For an unsigned conversion, there is one more way to do it.
5015 If we have a signed conversion, we generate code that compares
5016 the real value to the largest representable positive number. If if
5017 is smaller, the conversion is done normally. Otherwise, subtract
5018 one plus the highest signed number, convert, and add it back.
5020 We only need to check all real modes, since we know we didn't find
5021 anything with a wider integer mode.
5023 This code used to extend FP value into mode wider than the destination.
5024 This is needed for decimal float modes which cannot accurately
5025 represent one plus the highest signed number of the same size, but
5026 not for binary modes. Consider, for instance conversion from SFmode
5029 The hot path through the code is dealing with inputs smaller than 2^63
5030 and doing just the conversion, so there is no bits to lose.
5032 In the other path we know the value is positive in the range 2^63..2^64-1
5033 inclusive. (as for other input overflow happens and result is undefined)
5034 So we know that the most important bit set in mantissa corresponds to
5035 2^63. The subtraction of 2^63 should not generate any rounding as it
5036 simply clears out that bit. The rest is trivial. */
5038 scalar_int_mode to_mode
;
5040 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5041 && HWI_COMPUTABLE_MODE_P (to_mode
))
5042 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5044 scalar_mode fmode
= fmode_iter
.require ();
5045 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5047 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5048 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5051 REAL_VALUE_TYPE offset
;
5053 rtx_code_label
*lab1
, *lab2
;
5056 bitsize
= GET_MODE_PRECISION (to_mode
);
5057 real_2expN (&offset
, bitsize
- 1, fmode
);
5058 limit
= const_double_from_real_value (offset
, fmode
);
5059 lab1
= gen_label_rtx ();
5060 lab2
= gen_label_rtx ();
5062 if (fmode
!= GET_MODE (from
))
5063 from
= convert_to_mode (fmode
, from
, 0);
5065 /* See if we need to do the subtraction. */
5066 do_pending_stack_adjust ();
5067 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5068 GET_MODE (from
), 0, lab1
);
5070 /* If not, do the signed "fix" and branch around fixup code. */
5071 expand_fix (to
, from
, 0);
5072 emit_jump_insn (targetm
.gen_jump (lab2
));
5075 /* Otherwise, subtract 2**(N-1), convert to signed number,
5076 then add 2**(N-1). Do the addition using XOR since this
5077 will often generate better code. */
5079 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5080 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5081 expand_fix (to
, target
, 0);
5082 target
= expand_binop (to_mode
, xor_optab
, to
,
5084 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5086 to
, 1, OPTAB_LIB_WIDEN
);
5089 emit_move_insn (to
, target
);
5093 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5095 /* Make a place for a REG_NOTE and add it. */
5096 insn
= emit_move_insn (to
, to
);
5097 set_dst_reg_note (insn
, REG_EQUAL
,
5098 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5107 /* We can't do it with an insn, so use a library call. But first ensure
5108 that the mode of TO is at least as wide as SImode, since those are the
5109 only library calls we know about. */
5111 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5113 target
= gen_reg_rtx (SImode
);
5115 expand_fix (target
, from
, unsignedp
);
5123 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5124 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5125 gcc_assert (libfunc
);
5129 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5130 GET_MODE (to
), from
, GET_MODE (from
));
5131 insns
= get_insns ();
5134 emit_libcall_block (insns
, target
, value
,
5135 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5136 GET_MODE (to
), from
));
5141 if (GET_MODE (to
) == GET_MODE (target
))
5142 emit_move_insn (to
, target
);
5144 convert_move (to
, target
, 0);
5149 /* Promote integer arguments for a libcall if necessary.
5150 emit_library_call_value cannot do the promotion because it does not
5151 know if it should do a signed or unsigned promotion. This is because
5152 there are no tree types defined for libcalls. */
5155 prepare_libcall_arg (rtx arg
, int uintp
)
5157 scalar_int_mode mode
;
5158 machine_mode arg_mode
;
5159 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5161 /* If we need to promote the integer function argument we need to do
5162 it here instead of inside emit_library_call_value because in
5163 emit_library_call_value we don't know if we should do a signed or
5164 unsigned promotion. */
5167 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5168 &unsigned_p
, NULL_TREE
, 0);
5169 if (arg_mode
!= mode
)
5170 return convert_to_mode (arg_mode
, arg
, uintp
);
5175 /* Generate code to convert FROM or TO a fixed-point.
5176 If UINTP is true, either TO or FROM is an unsigned integer.
5177 If SATP is true, we need to saturate the result. */
5180 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5182 machine_mode to_mode
= GET_MODE (to
);
5183 machine_mode from_mode
= GET_MODE (from
);
5185 enum rtx_code this_code
;
5186 enum insn_code code
;
5191 if (to_mode
== from_mode
)
5193 emit_move_insn (to
, from
);
5199 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5200 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5204 tab
= satp
? satfract_optab
: fract_optab
;
5205 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5207 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5208 if (code
!= CODE_FOR_nothing
)
5210 emit_unop_insn (code
, to
, from
, this_code
);
5214 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5215 gcc_assert (libfunc
);
5217 from
= prepare_libcall_arg (from
, uintp
);
5218 from_mode
= GET_MODE (from
);
5221 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5223 insns
= get_insns ();
5226 emit_libcall_block (insns
, to
, value
,
5227 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5230 /* Generate code to convert FROM to fixed point and store in TO. FROM
5231 must be floating point, TO must be signed. Use the conversion optab
5232 TAB to do the conversion. */
5235 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5237 enum insn_code icode
;
5239 machine_mode fmode
, imode
;
5241 /* We first try to find a pair of modes, one real and one integer, at
5242 least as wide as FROM and TO, respectively, in which we can open-code
5243 this conversion. If the integer mode is wider than the mode of TO,
5244 we can do the conversion either signed or unsigned. */
5246 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5247 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5249 icode
= convert_optab_handler (tab
, imode
, fmode
);
5250 if (icode
!= CODE_FOR_nothing
)
5252 rtx_insn
*last
= get_last_insn ();
5253 if (fmode
!= GET_MODE (from
))
5254 from
= convert_to_mode (fmode
, from
, 0);
5256 if (imode
!= GET_MODE (to
))
5257 target
= gen_reg_rtx (imode
);
5259 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5261 delete_insns_since (last
);
5265 convert_move (to
, target
, 0);
5273 /* Report whether we have an instruction to perform the operation
5274 specified by CODE on operands of mode MODE. */
5276 have_insn_for (enum rtx_code code
, machine_mode mode
)
5278 return (code_to_optab (code
)
5279 && (optab_handler (code_to_optab (code
), mode
)
5280 != CODE_FOR_nothing
));
5283 /* Print information about the current contents of the optabs on
5287 debug_optab_libfuncs (void)
5291 /* Dump the arithmetic optabs. */
5292 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5293 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5295 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5298 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5299 fprintf (stderr
, "%s\t%s:\t%s\n",
5300 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5306 /* Dump the conversion optabs. */
5307 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5308 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5309 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5311 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5315 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5316 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5317 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5325 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5326 CODE. Return 0 on failure. */
5329 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5331 machine_mode mode
= GET_MODE (op1
);
5332 enum insn_code icode
;
5336 if (mode
== VOIDmode
)
5339 icode
= optab_handler (ctrap_optab
, mode
);
5340 if (icode
== CODE_FOR_nothing
)
5343 /* Some targets only accept a zero trap code. */
5344 if (!insn_operand_matches (icode
, 3, tcode
))
5347 do_pending_stack_adjust ();
5349 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5354 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5357 /* If that failed, then give up. */
5365 insn
= get_insns ();
5370 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5371 or unsigned operation code. */
5374 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5386 code
= unsignedp
? LTU
: LT
;
5389 code
= unsignedp
? LEU
: LE
;
5392 code
= unsignedp
? GTU
: GT
;
5395 code
= unsignedp
? GEU
: GE
;
5398 case UNORDERED_EXPR
:
5437 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5438 select signed or unsigned operators. OPNO holds the index of the
5439 first comparison operand for insn ICODE. Do not generate the
5440 compare instruction itself. */
5443 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5444 tree t_op0
, tree t_op1
, bool unsignedp
,
5445 enum insn_code icode
, unsigned int opno
)
5447 class expand_operand ops
[2];
5448 rtx rtx_op0
, rtx_op1
;
5449 machine_mode m0
, m1
;
5450 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5452 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5454 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5455 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5456 cases, use the original mode. */
5457 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5459 m0
= GET_MODE (rtx_op0
);
5461 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5463 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5465 m1
= GET_MODE (rtx_op1
);
5467 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5469 create_input_operand (&ops
[0], rtx_op0
, m0
);
5470 create_input_operand (&ops
[1], rtx_op1
, m1
);
5471 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5473 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5476 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5477 the first vec_perm operand, assuming the second operand (for left shift
5478 first operand) is a constant vector of zeros. Return the shift distance
5479 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
5480 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
5481 shift or vec_shl_optab for left shift. */
5483 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
,
5486 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
5487 poly_int64 first
= sel
[0];
5488 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
5491 if (shift_optab
== vec_shl_optab
)
5494 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5496 unsigned firstidx
= 0;
5497 for (unsigned int i
= 0; i
< nelt
; i
++)
5499 if (known_eq (sel
[i
], nelt
))
5501 if (i
== 0 || firstidx
)
5506 ? maybe_ne (sel
[i
], nelt
+ i
- firstidx
)
5507 : maybe_ge (sel
[i
], nelt
))
5515 else if (!sel
.series_p (0, 1, first
, 1))
5518 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5520 for (unsigned int i
= 1; i
< nelt
; i
++)
5522 poly_int64 expected
= i
+ first
;
5523 /* Indices into the second vector are all equivalent. */
5524 if (maybe_lt (sel
[i
], nelt
)
5525 ? maybe_ne (sel
[i
], expected
)
5526 : maybe_lt (expected
, nelt
))
5531 return gen_int_shift_amount (mode
, first
* bitsize
);
5534 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5537 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5538 rtx v0
, rtx v1
, rtx sel
)
5540 machine_mode tmode
= GET_MODE (target
);
5541 machine_mode smode
= GET_MODE (sel
);
5542 class expand_operand ops
[4];
5544 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
5545 || related_int_vector_mode (tmode
).require () == smode
);
5546 create_output_operand (&ops
[0], target
, tmode
);
5547 create_input_operand (&ops
[3], sel
, smode
);
5549 /* Make an effort to preserve v0 == v1. The target expander is able to
5550 rely on this to determine if we're permuting a single input operand. */
5551 if (rtx_equal_p (v0
, v1
))
5553 if (!insn_operand_matches (icode
, 1, v0
))
5554 v0
= force_reg (tmode
, v0
);
5555 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5556 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5558 create_fixed_operand (&ops
[1], v0
);
5559 create_fixed_operand (&ops
[2], v0
);
5563 create_input_operand (&ops
[1], v0
, tmode
);
5564 create_input_operand (&ops
[2], v1
, tmode
);
5567 if (maybe_expand_insn (icode
, 4, ops
))
5568 return ops
[0].value
;
5572 /* Implement a permutation of vectors v0 and v1 using the permutation
5573 vector in SEL and return the result. Use TARGET to hold the result
5574 if nonnull and convenient.
5576 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5577 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5578 to have a particular mode. */
5581 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
5582 const vec_perm_builder
&sel
, machine_mode sel_mode
,
5585 if (!target
|| !register_operand (target
, mode
))
5586 target
= gen_reg_rtx (mode
);
5588 /* Set QIMODE to a different vector mode with byte elements.
5589 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5590 machine_mode qimode
;
5591 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
5594 rtx_insn
*last
= get_last_insn ();
5596 bool single_arg_p
= rtx_equal_p (v0
, v1
);
5597 /* Always specify two input vectors here and leave the target to handle
5598 cases in which the inputs are equal. Not all backends can cope with
5599 the single-input representation when testing for a double-input
5600 target instruction. */
5601 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
5603 /* See if this can be handled with a vec_shr or vec_shl. We only do this
5604 if the second (for vec_shr) or first (for vec_shl) vector is all
5606 insn_code shift_code
= CODE_FOR_nothing
;
5607 insn_code shift_code_qi
= CODE_FOR_nothing
;
5608 optab shift_optab
= unknown_optab
;
5610 if (v1
== CONST0_RTX (GET_MODE (v1
)))
5611 shift_optab
= vec_shr_optab
;
5612 else if (v0
== CONST0_RTX (GET_MODE (v0
)))
5614 shift_optab
= vec_shl_optab
;
5617 if (shift_optab
!= unknown_optab
)
5619 shift_code
= optab_handler (shift_optab
, mode
);
5620 shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5621 ? optab_handler (shift_optab
, qimode
)
5622 : CODE_FOR_nothing
);
5624 if (shift_code
!= CODE_FOR_nothing
|| shift_code_qi
!= CODE_FOR_nothing
)
5626 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
, shift_optab
);
5629 class expand_operand ops
[3];
5630 if (shift_code
!= CODE_FOR_nothing
)
5632 create_output_operand (&ops
[0], target
, mode
);
5633 create_input_operand (&ops
[1], v2
, mode
);
5634 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
5635 if (maybe_expand_insn (shift_code
, 3, ops
))
5636 return ops
[0].value
;
5638 if (shift_code_qi
!= CODE_FOR_nothing
)
5640 rtx tmp
= gen_reg_rtx (qimode
);
5641 create_output_operand (&ops
[0], tmp
, qimode
);
5642 create_input_operand (&ops
[1], gen_lowpart (qimode
, v2
), qimode
);
5643 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
5644 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5645 return gen_lowpart (mode
, ops
[0].value
);
5650 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
5652 v0
= force_reg (mode
, v0
);
5656 v1
= force_reg (mode
, v1
);
5658 if (targetm
.vectorize
.vec_perm_const (mode
, target
, v0
, v1
, indices
))
5662 /* Fall back to a constant byte-based permutation. */
5663 vec_perm_indices qimode_indices
;
5664 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
5665 if (qimode
!= VOIDmode
)
5667 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
5668 target_qi
= gen_reg_rtx (qimode
);
5669 v0_qi
= gen_lowpart (qimode
, v0
);
5670 v1_qi
= gen_lowpart (qimode
, v1
);
5671 if (targetm
.vectorize
.vec_perm_const
!= NULL
5672 && targetm
.vectorize
.vec_perm_const (qimode
, target_qi
, v0_qi
,
5673 v1_qi
, qimode_indices
))
5674 return gen_lowpart (mode
, target_qi
);
5677 /* Otherwise expand as a fully variable permuation. */
5679 /* The optabs are only defined for selectors with the same width
5680 as the values being permuted. */
5681 machine_mode required_sel_mode
;
5682 if (!related_int_vector_mode (mode
).exists (&required_sel_mode
))
5684 delete_insns_since (last
);
5688 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
5689 If that isn't the mode we want then we need to prove that using
5690 REQUIRED_SEL_MODE is OK. */
5691 if (sel_mode
!= required_sel_mode
)
5693 if (!selector_fits_mode_p (required_sel_mode
, indices
))
5695 delete_insns_since (last
);
5698 sel_mode
= required_sel_mode
;
5701 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
5702 if (icode
!= CODE_FOR_nothing
)
5704 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
5705 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
5710 if (qimode
!= VOIDmode
5711 && selector_fits_mode_p (qimode
, qimode_indices
))
5713 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5714 if (icode
!= CODE_FOR_nothing
)
5716 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
5717 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
5719 return gen_lowpart (mode
, tmp
);
5723 delete_insns_since (last
);
5727 /* Implement a permutation of vectors v0 and v1 using the permutation
5728 vector in SEL and return the result. Use TARGET to hold the result
5729 if nonnull and convenient.
5731 MODE is the mode of the vectors being permuted (V0 and V1).
5732 SEL must have the integer equivalent of MODE and is known to be
5733 unsuitable for permutes with a constant permutation vector. */
5736 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5738 enum insn_code icode
;
5742 u
= GET_MODE_UNIT_SIZE (mode
);
5744 if (!target
|| GET_MODE (target
) != mode
)
5745 target
= gen_reg_rtx (mode
);
5747 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5748 if (icode
!= CODE_FOR_nothing
)
5750 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5755 /* As a special case to aid several targets, lower the element-based
5756 permutation to a byte-based permutation and try again. */
5757 machine_mode qimode
;
5758 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
5759 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
5761 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5762 if (icode
== CODE_FOR_nothing
)
5765 /* Multiply each element by its byte size. */
5766 machine_mode selmode
= GET_MODE (sel
);
5768 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5769 NULL
, 0, OPTAB_DIRECT
);
5771 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5772 gen_int_shift_amount (selmode
, exact_log2 (u
)),
5773 NULL
, 0, OPTAB_DIRECT
);
5774 gcc_assert (sel
!= NULL
);
5776 /* Broadcast the low byte each element into each of its bytes.
5777 The encoding has U interleaved stepped patterns, one for each
5778 byte of an element. */
5779 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
5780 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
5781 for (i
= 0; i
< 3; ++i
)
5782 for (unsigned int j
= 0; j
< u
; ++j
)
5783 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
5784 sel
= gen_lowpart (qimode
, sel
);
5785 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
5786 gcc_assert (sel
!= NULL
);
5788 /* Add the byte offset to each byte element. */
5789 /* Note that the definition of the indicies here is memory ordering,
5790 so there should be no difference between big and little endian. */
5791 rtx_vector_builder
byte_indices (qimode
, u
, 1);
5792 for (i
= 0; i
< u
; ++i
)
5793 byte_indices
.quick_push (GEN_INT (i
));
5794 tmp
= byte_indices
.build ();
5795 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5796 sel
, 0, OPTAB_DIRECT
);
5797 gcc_assert (sel_qi
!= NULL
);
5799 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5800 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5801 gen_lowpart (qimode
, v1
), sel_qi
);
5803 tmp
= gen_lowpart (mode
, tmp
);
5807 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5811 expand_vec_cond_mask_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5814 class expand_operand ops
[4];
5815 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5816 machine_mode mask_mode
= TYPE_MODE (TREE_TYPE (op0
));
5817 enum insn_code icode
= get_vcond_mask_icode (mode
, mask_mode
);
5818 rtx mask
, rtx_op1
, rtx_op2
;
5820 if (icode
== CODE_FOR_nothing
)
5823 mask
= expand_normal (op0
);
5824 rtx_op1
= expand_normal (op1
);
5825 rtx_op2
= expand_normal (op2
);
5827 mask
= force_reg (mask_mode
, mask
);
5828 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5830 create_output_operand (&ops
[0], target
, mode
);
5831 create_input_operand (&ops
[1], rtx_op1
, mode
);
5832 create_input_operand (&ops
[2], rtx_op2
, mode
);
5833 create_input_operand (&ops
[3], mask
, mask_mode
);
5834 expand_insn (icode
, 4, ops
);
5836 return ops
[0].value
;
5839 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5843 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5846 class expand_operand ops
[6];
5847 enum insn_code icode
;
5848 rtx comparison
, rtx_op1
, rtx_op2
;
5849 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5850 machine_mode cmp_op_mode
;
5853 enum tree_code tcode
;
5855 if (COMPARISON_CLASS_P (op0
))
5857 op0a
= TREE_OPERAND (op0
, 0);
5858 op0b
= TREE_OPERAND (op0
, 1);
5859 tcode
= TREE_CODE (op0
);
5863 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5864 if (get_vcond_mask_icode (mode
, TYPE_MODE (TREE_TYPE (op0
)))
5865 != CODE_FOR_nothing
)
5866 return expand_vec_cond_mask_expr (vec_cond_type
, op0
, op1
,
5871 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0
)))
5872 == MODE_VECTOR_INT
);
5874 op0b
= build_zero_cst (TREE_TYPE (op0
));
5878 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5879 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5882 gcc_assert (known_eq (GET_MODE_SIZE (mode
), GET_MODE_SIZE (cmp_op_mode
))
5883 && known_eq (GET_MODE_NUNITS (mode
),
5884 GET_MODE_NUNITS (cmp_op_mode
)));
5886 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5887 if (icode
== CODE_FOR_nothing
)
5889 if (tcode
== LT_EXPR
5891 && TREE_CODE (op0
) == VECTOR_CST
)
5893 /* A VEC_COND_EXPR condition could be folded from EQ_EXPR/NE_EXPR
5894 into a constant when only get_vcond_eq_icode is supported.
5895 Verify < 0 and != 0 behave the same and change it to NE_EXPR. */
5896 unsigned HOST_WIDE_INT nelts
;
5897 if (!VECTOR_CST_NELTS (op0
).is_constant (&nelts
))
5899 if (VECTOR_CST_STEPPED_P (op0
))
5901 nelts
= vector_cst_encoded_nelts (op0
);
5903 for (unsigned int i
= 0; i
< nelts
; ++i
)
5904 if (tree_int_cst_sgn (vector_cst_elt (op0
, i
)) == 1)
5908 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5909 icode
= get_vcond_eq_icode (mode
, cmp_op_mode
);
5910 if (icode
== CODE_FOR_nothing
)
5914 comparison
= vector_compare_rtx (VOIDmode
, tcode
, op0a
, op0b
, unsignedp
,
5916 rtx_op1
= expand_normal (op1
);
5917 rtx_op2
= expand_normal (op2
);
5919 create_output_operand (&ops
[0], target
, mode
);
5920 create_input_operand (&ops
[1], rtx_op1
, mode
);
5921 create_input_operand (&ops
[2], rtx_op2
, mode
);
5922 create_fixed_operand (&ops
[3], comparison
);
5923 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5924 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5925 expand_insn (icode
, 6, ops
);
5926 return ops
[0].value
;
5929 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5930 Use TARGET for the result if nonnull and convenient. */
5933 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
5935 class expand_operand ops
[3];
5936 enum insn_code icode
;
5937 machine_mode emode
= GET_MODE_INNER (vmode
);
5939 icode
= direct_optab_handler (vec_series_optab
, vmode
);
5940 gcc_assert (icode
!= CODE_FOR_nothing
);
5942 create_output_operand (&ops
[0], target
, vmode
);
5943 create_input_operand (&ops
[1], op0
, emode
);
5944 create_input_operand (&ops
[2], op1
, emode
);
5946 expand_insn (icode
, 3, ops
);
5947 return ops
[0].value
;
5950 /* Generate insns for a vector comparison into a mask. */
5953 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5955 class expand_operand ops
[4];
5956 enum insn_code icode
;
5958 machine_mode mask_mode
= TYPE_MODE (type
);
5962 enum tree_code tcode
;
5964 op0a
= TREE_OPERAND (exp
, 0);
5965 op0b
= TREE_OPERAND (exp
, 1);
5966 tcode
= TREE_CODE (exp
);
5968 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5969 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5971 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5972 if (icode
== CODE_FOR_nothing
)
5974 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5975 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
5976 if (icode
== CODE_FOR_nothing
)
5980 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
5981 unsignedp
, icode
, 2);
5982 create_output_operand (&ops
[0], target
, mask_mode
);
5983 create_fixed_operand (&ops
[1], comparison
);
5984 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5985 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5986 expand_insn (icode
, 4, ops
);
5987 return ops
[0].value
;
5990 /* Expand a highpart multiply. */
5993 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5994 rtx target
, bool uns_p
)
5996 class expand_operand eops
[3];
5997 enum insn_code icode
;
6003 method
= can_mult_highpart_p (mode
, uns_p
);
6009 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6010 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
6013 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6014 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6017 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6018 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6019 if (BYTES_BIG_ENDIAN
)
6020 std::swap (tab1
, tab2
);
6026 icode
= optab_handler (tab1
, mode
);
6027 wmode
= insn_data
[icode
].operand
[0].mode
;
6028 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
6029 GET_MODE_NUNITS (mode
)));
6030 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
6032 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6033 create_input_operand (&eops
[1], op0
, mode
);
6034 create_input_operand (&eops
[2], op1
, mode
);
6035 expand_insn (icode
, 3, eops
);
6036 m1
= gen_lowpart (mode
, eops
[0].value
);
6038 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6039 create_input_operand (&eops
[1], op0
, mode
);
6040 create_input_operand (&eops
[2], op1
, mode
);
6041 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
6042 m2
= gen_lowpart (mode
, eops
[0].value
);
6044 vec_perm_builder sel
;
6047 /* The encoding has 2 interleaved stepped patterns. */
6048 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
6049 for (i
= 0; i
< 6; ++i
)
6050 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
6051 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
6055 /* The encoding has a single interleaved stepped pattern. */
6056 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
6057 for (i
= 0; i
< 3; ++i
)
6058 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
6061 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
6064 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6068 find_cc_set (rtx x
, const_rtx pat
, void *data
)
6070 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
6071 && GET_CODE (pat
) == SET
)
6073 rtx
*p_cc_reg
= (rtx
*) data
;
6074 gcc_assert (!*p_cc_reg
);
6079 /* This is a helper function for the other atomic operations. This function
6080 emits a loop that contains SEQ that iterates until a compare-and-swap
6081 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6082 a set of instructions that takes a value from OLD_REG as an input and
6083 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6084 set to the current contents of MEM. After SEQ, a compare-and-swap will
6085 attempt to update MEM with NEW_REG. The function returns true when the
6086 loop was generated successfully. */
6089 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6091 machine_mode mode
= GET_MODE (mem
);
6092 rtx_code_label
*label
;
6093 rtx cmp_reg
, success
, oldval
;
6095 /* The loop we want to generate looks like
6101 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6105 Note that we only do the plain load from memory once. Subsequent
6106 iterations use the value loaded by the compare-and-swap pattern. */
6108 label
= gen_label_rtx ();
6109 cmp_reg
= gen_reg_rtx (mode
);
6111 emit_move_insn (cmp_reg
, mem
);
6113 emit_move_insn (old_reg
, cmp_reg
);
6119 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6120 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6124 if (oldval
!= cmp_reg
)
6125 emit_move_insn (cmp_reg
, oldval
);
6127 /* Mark this jump predicted not taken. */
6128 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6129 GET_MODE (success
), 1, label
,
6130 profile_probability::guessed_never ());
6135 /* This function tries to emit an atomic_exchange intruction. VAL is written
6136 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6137 using TARGET if possible. */
6140 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6142 machine_mode mode
= GET_MODE (mem
);
6143 enum insn_code icode
;
6145 /* If the target supports the exchange directly, great. */
6146 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6147 if (icode
!= CODE_FOR_nothing
)
6149 class expand_operand ops
[4];
6151 create_output_operand (&ops
[0], target
, mode
);
6152 create_fixed_operand (&ops
[1], mem
);
6153 create_input_operand (&ops
[2], val
, mode
);
6154 create_integer_operand (&ops
[3], model
);
6155 if (maybe_expand_insn (icode
, 4, ops
))
6156 return ops
[0].value
;
6162 /* This function tries to implement an atomic exchange operation using
6163 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6164 The previous contents of *MEM are returned, using TARGET if possible.
6165 Since this instructionn is an acquire barrier only, stronger memory
6166 models may require additional barriers to be emitted. */
6169 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6170 enum memmodel model
)
6172 machine_mode mode
= GET_MODE (mem
);
6173 enum insn_code icode
;
6174 rtx_insn
*last_insn
= get_last_insn ();
6176 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6178 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6179 exists, and the memory model is stronger than acquire, add a release
6180 barrier before the instruction. */
6182 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6183 expand_mem_thread_fence (model
);
6185 if (icode
!= CODE_FOR_nothing
)
6187 class expand_operand ops
[3];
6188 create_output_operand (&ops
[0], target
, mode
);
6189 create_fixed_operand (&ops
[1], mem
);
6190 create_input_operand (&ops
[2], val
, mode
);
6191 if (maybe_expand_insn (icode
, 3, ops
))
6192 return ops
[0].value
;
6195 /* If an external test-and-set libcall is provided, use that instead of
6196 any external compare-and-swap that we might get from the compare-and-
6197 swap-loop expansion later. */
6198 if (!can_compare_and_swap_p (mode
, false))
6200 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6201 if (libfunc
!= NULL
)
6205 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6206 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6207 mode
, addr
, ptr_mode
,
6212 /* If the test_and_set can't be emitted, eliminate any barrier that might
6213 have been emitted. */
6214 delete_insns_since (last_insn
);
6218 /* This function tries to implement an atomic exchange operation using a
6219 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6220 *MEM are returned, using TARGET if possible. No memory model is required
6221 since a compare_and_swap loop is seq-cst. */
6224 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6226 machine_mode mode
= GET_MODE (mem
);
6228 if (can_compare_and_swap_p (mode
, true))
6230 if (!target
|| !register_operand (target
, mode
))
6231 target
= gen_reg_rtx (mode
);
6232 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6239 /* This function tries to implement an atomic test-and-set operation
6240 using the atomic_test_and_set instruction pattern. A boolean value
6241 is returned from the operation, using TARGET if possible. */
6244 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6246 machine_mode pat_bool_mode
;
6247 class expand_operand ops
[3];
6249 if (!targetm
.have_atomic_test_and_set ())
6252 /* While we always get QImode from __atomic_test_and_set, we get
6253 other memory modes from __sync_lock_test_and_set. Note that we
6254 use no endian adjustment here. This matches the 4.6 behavior
6255 in the Sparc backend. */
6256 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6257 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6258 if (GET_MODE (mem
) != QImode
)
6259 mem
= adjust_address_nv (mem
, QImode
, 0);
6261 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6262 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6263 create_fixed_operand (&ops
[1], mem
);
6264 create_integer_operand (&ops
[2], model
);
6266 if (maybe_expand_insn (icode
, 3, ops
))
6267 return ops
[0].value
;
6271 /* This function expands the legacy _sync_lock test_and_set operation which is
6272 generally an atomic exchange. Some limited targets only allow the
6273 constant 1 to be stored. This is an ACQUIRE operation.
6275 TARGET is an optional place to stick the return value.
6276 MEM is where VAL is stored. */
6279 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6283 /* Try an atomic_exchange first. */
6284 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6288 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6289 MEMMODEL_SYNC_ACQUIRE
);
6293 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6297 /* If there are no other options, try atomic_test_and_set if the value
6298 being stored is 1. */
6299 if (val
== const1_rtx
)
6300 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6305 /* This function expands the atomic test_and_set operation:
6306 atomically store a boolean TRUE into MEM and return the previous value.
6308 MEMMODEL is the memory model variant to use.
6309 TARGET is an optional place to stick the return value. */
6312 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6314 machine_mode mode
= GET_MODE (mem
);
6315 rtx ret
, trueval
, subtarget
;
6317 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6321 /* Be binary compatible with non-default settings of trueval, and different
6322 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6323 another only has atomic-exchange. */
6324 if (targetm
.atomic_test_and_set_trueval
== 1)
6326 trueval
= const1_rtx
;
6327 subtarget
= target
? target
: gen_reg_rtx (mode
);
6331 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6332 subtarget
= gen_reg_rtx (mode
);
6335 /* Try the atomic-exchange optab... */
6336 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6338 /* ... then an atomic-compare-and-swap loop ... */
6340 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6342 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6344 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6346 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6347 things with the value 1. Thus we try again without trueval. */
6348 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6349 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6351 /* Failing all else, assume a single threaded environment and simply
6352 perform the operation. */
6355 /* If the result is ignored skip the move to target. */
6356 if (subtarget
!= const0_rtx
)
6357 emit_move_insn (subtarget
, mem
);
6359 emit_move_insn (mem
, trueval
);
6363 /* Recall that have to return a boolean value; rectify if trueval
6364 is not exactly one. */
6365 if (targetm
.atomic_test_and_set_trueval
!= 1)
6366 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6371 /* This function expands the atomic exchange operation:
6372 atomically store VAL in MEM and return the previous value in MEM.
6374 MEMMODEL is the memory model variant to use.
6375 TARGET is an optional place to stick the return value. */
6378 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6380 machine_mode mode
= GET_MODE (mem
);
6383 /* If loads are not atomic for the required size and we are not called to
6384 provide a __sync builtin, do not do anything so that we stay consistent
6385 with atomic loads of the same size. */
6386 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6389 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6391 /* Next try a compare-and-swap loop for the exchange. */
6393 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6398 /* This function expands the atomic compare exchange operation:
6400 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6401 *PTARGET_OVAL is an optional place to store the old value from memory.
6402 Both target parameters may be NULL or const0_rtx to indicate that we do
6403 not care about that return value. Both target parameters are updated on
6404 success to the actual location of the corresponding result.
6406 MEMMODEL is the memory model variant to use.
6408 The return value of the function is true for success. */
6411 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6412 rtx mem
, rtx expected
, rtx desired
,
6413 bool is_weak
, enum memmodel succ_model
,
6414 enum memmodel fail_model
)
6416 machine_mode mode
= GET_MODE (mem
);
6417 class expand_operand ops
[8];
6418 enum insn_code icode
;
6419 rtx target_oval
, target_bool
= NULL_RTX
;
6422 /* If loads are not atomic for the required size and we are not called to
6423 provide a __sync builtin, do not do anything so that we stay consistent
6424 with atomic loads of the same size. */
6425 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6428 /* Load expected into a register for the compare and swap. */
6429 if (MEM_P (expected
))
6430 expected
= copy_to_reg (expected
);
6432 /* Make sure we always have some place to put the return oldval.
6433 Further, make sure that place is distinct from the input expected,
6434 just in case we need that path down below. */
6435 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6436 ptarget_oval
= NULL
;
6438 if (ptarget_oval
== NULL
6439 || (target_oval
= *ptarget_oval
) == NULL
6440 || reg_overlap_mentioned_p (expected
, target_oval
))
6441 target_oval
= gen_reg_rtx (mode
);
6443 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6444 if (icode
!= CODE_FOR_nothing
)
6446 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6448 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6449 ptarget_bool
= NULL
;
6451 /* Make sure we always have a place for the bool operand. */
6452 if (ptarget_bool
== NULL
6453 || (target_bool
= *ptarget_bool
) == NULL
6454 || GET_MODE (target_bool
) != bool_mode
)
6455 target_bool
= gen_reg_rtx (bool_mode
);
6457 /* Emit the compare_and_swap. */
6458 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6459 create_output_operand (&ops
[1], target_oval
, mode
);
6460 create_fixed_operand (&ops
[2], mem
);
6461 create_input_operand (&ops
[3], expected
, mode
);
6462 create_input_operand (&ops
[4], desired
, mode
);
6463 create_integer_operand (&ops
[5], is_weak
);
6464 create_integer_operand (&ops
[6], succ_model
);
6465 create_integer_operand (&ops
[7], fail_model
);
6466 if (maybe_expand_insn (icode
, 8, ops
))
6468 /* Return success/failure. */
6469 target_bool
= ops
[0].value
;
6470 target_oval
= ops
[1].value
;
6475 /* Otherwise fall back to the original __sync_val_compare_and_swap
6476 which is always seq-cst. */
6477 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6478 if (icode
!= CODE_FOR_nothing
)
6482 create_output_operand (&ops
[0], target_oval
, mode
);
6483 create_fixed_operand (&ops
[1], mem
);
6484 create_input_operand (&ops
[2], expected
, mode
);
6485 create_input_operand (&ops
[3], desired
, mode
);
6486 if (!maybe_expand_insn (icode
, 4, ops
))
6489 target_oval
= ops
[0].value
;
6491 /* If the caller isn't interested in the boolean return value,
6492 skip the computation of it. */
6493 if (ptarget_bool
== NULL
)
6496 /* Otherwise, work out if the compare-and-swap succeeded. */
6498 if (have_insn_for (COMPARE
, CCmode
))
6499 note_stores (get_last_insn (), find_cc_set
, &cc_reg
);
6502 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6503 const0_rtx
, VOIDmode
, 0, 1);
6506 goto success_bool_from_val
;
6509 /* Also check for library support for __sync_val_compare_and_swap. */
6510 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6511 if (libfunc
!= NULL
)
6513 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6514 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6515 mode
, addr
, ptr_mode
,
6516 expected
, mode
, desired
, mode
);
6517 emit_move_insn (target_oval
, target
);
6519 /* Compute the boolean return value only if requested. */
6521 goto success_bool_from_val
;
6529 success_bool_from_val
:
6530 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6531 expected
, VOIDmode
, 1, 1);
6533 /* Make sure that the oval output winds up where the caller asked. */
6535 *ptarget_oval
= target_oval
;
6537 *ptarget_bool
= target_bool
;
6541 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6544 expand_asm_memory_blockage (void)
6548 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6549 rtvec_alloc (0), rtvec_alloc (0),
6550 rtvec_alloc (0), UNKNOWN_LOCATION
);
6551 MEM_VOLATILE_P (asm_op
) = 1;
6553 clob
= gen_rtx_SCRATCH (VOIDmode
);
6554 clob
= gen_rtx_MEM (BLKmode
, clob
);
6555 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6557 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6560 /* Do not propagate memory accesses across this point. */
6563 expand_memory_blockage (void)
6565 if (targetm
.have_memory_blockage ())
6566 emit_insn (targetm
.gen_memory_blockage ());
6568 expand_asm_memory_blockage ();
6571 /* This routine will either emit the mem_thread_fence pattern or issue a
6572 sync_synchronize to generate a fence for memory model MEMMODEL. */
6575 expand_mem_thread_fence (enum memmodel model
)
6577 if (is_mm_relaxed (model
))
6579 if (targetm
.have_mem_thread_fence ())
6581 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6582 expand_memory_blockage ();
6584 else if (targetm
.have_memory_barrier ())
6585 emit_insn (targetm
.gen_memory_barrier ());
6586 else if (synchronize_libfunc
!= NULL_RTX
)
6587 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
6589 expand_memory_blockage ();
6592 /* Emit a signal fence with given memory model. */
6595 expand_mem_signal_fence (enum memmodel model
)
6597 /* No machine barrier is required to implement a signal fence, but
6598 a compiler memory barrier must be issued, except for relaxed MM. */
6599 if (!is_mm_relaxed (model
))
6600 expand_memory_blockage ();
6603 /* This function expands the atomic load operation:
6604 return the atomically loaded value in MEM.
6606 MEMMODEL is the memory model variant to use.
6607 TARGET is an option place to stick the return value. */
6610 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6612 machine_mode mode
= GET_MODE (mem
);
6613 enum insn_code icode
;
6615 /* If the target supports the load directly, great. */
6616 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6617 if (icode
!= CODE_FOR_nothing
)
6619 class expand_operand ops
[3];
6620 rtx_insn
*last
= get_last_insn ();
6621 if (is_mm_seq_cst (model
))
6622 expand_memory_blockage ();
6624 create_output_operand (&ops
[0], target
, mode
);
6625 create_fixed_operand (&ops
[1], mem
);
6626 create_integer_operand (&ops
[2], model
);
6627 if (maybe_expand_insn (icode
, 3, ops
))
6629 if (!is_mm_relaxed (model
))
6630 expand_memory_blockage ();
6631 return ops
[0].value
;
6633 delete_insns_since (last
);
6636 /* If the size of the object is greater than word size on this target,
6637 then we assume that a load will not be atomic. We could try to
6638 emulate a load with a compare-and-swap operation, but the store that
6639 doing this could result in would be incorrect if this is a volatile
6640 atomic load or targetting read-only-mapped memory. */
6641 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6642 /* If there is no atomic load, leave the library call. */
6645 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6646 if (!target
|| target
== const0_rtx
)
6647 target
= gen_reg_rtx (mode
);
6649 /* For SEQ_CST, emit a barrier before the load. */
6650 if (is_mm_seq_cst (model
))
6651 expand_mem_thread_fence (model
);
6653 emit_move_insn (target
, mem
);
6655 /* Emit the appropriate barrier after the load. */
6656 expand_mem_thread_fence (model
);
6661 /* This function expands the atomic store operation:
6662 Atomically store VAL in MEM.
6663 MEMMODEL is the memory model variant to use.
6664 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6665 function returns const0_rtx if a pattern was emitted. */
6668 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6670 machine_mode mode
= GET_MODE (mem
);
6671 enum insn_code icode
;
6672 class expand_operand ops
[3];
6674 /* If the target supports the store directly, great. */
6675 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6676 if (icode
!= CODE_FOR_nothing
)
6678 rtx_insn
*last
= get_last_insn ();
6679 if (!is_mm_relaxed (model
))
6680 expand_memory_blockage ();
6681 create_fixed_operand (&ops
[0], mem
);
6682 create_input_operand (&ops
[1], val
, mode
);
6683 create_integer_operand (&ops
[2], model
);
6684 if (maybe_expand_insn (icode
, 3, ops
))
6686 if (is_mm_seq_cst (model
))
6687 expand_memory_blockage ();
6690 delete_insns_since (last
);
6693 /* If using __sync_lock_release is a viable alternative, try it.
6694 Note that this will not be set to true if we are expanding a generic
6695 __atomic_store_n. */
6698 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6699 if (icode
!= CODE_FOR_nothing
)
6701 create_fixed_operand (&ops
[0], mem
);
6702 create_input_operand (&ops
[1], const0_rtx
, mode
);
6703 if (maybe_expand_insn (icode
, 2, ops
))
6705 /* lock_release is only a release barrier. */
6706 if (is_mm_seq_cst (model
))
6707 expand_mem_thread_fence (model
);
6713 /* If the size of the object is greater than word size on this target,
6714 a default store will not be atomic. */
6715 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6717 /* If loads are atomic or we are called to provide a __sync builtin,
6718 we can try a atomic_exchange and throw away the result. Otherwise,
6719 don't do anything so that we do not create an inconsistency between
6720 loads and stores. */
6721 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
6723 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6725 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
6733 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6734 expand_mem_thread_fence (model
);
6736 emit_move_insn (mem
, val
);
6738 /* For SEQ_CST, also emit a barrier after the store. */
6739 if (is_mm_seq_cst (model
))
6740 expand_mem_thread_fence (model
);
6746 /* Structure containing the pointers and values required to process the
6747 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6749 struct atomic_op_functions
6751 direct_optab mem_fetch_before
;
6752 direct_optab mem_fetch_after
;
6753 direct_optab mem_no_result
;
6756 direct_optab no_result
;
6757 enum rtx_code reverse_code
;
6761 /* Fill in structure pointed to by OP with the various optab entries for an
6762 operation of type CODE. */
6765 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6767 gcc_assert (op
!= NULL
);
6769 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6770 in the source code during compilation, and the optab entries are not
6771 computable until runtime. Fill in the values at runtime. */
6775 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6776 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6777 op
->mem_no_result
= atomic_add_optab
;
6778 op
->fetch_before
= sync_old_add_optab
;
6779 op
->fetch_after
= sync_new_add_optab
;
6780 op
->no_result
= sync_add_optab
;
6781 op
->reverse_code
= MINUS
;
6784 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6785 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6786 op
->mem_no_result
= atomic_sub_optab
;
6787 op
->fetch_before
= sync_old_sub_optab
;
6788 op
->fetch_after
= sync_new_sub_optab
;
6789 op
->no_result
= sync_sub_optab
;
6790 op
->reverse_code
= PLUS
;
6793 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6794 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6795 op
->mem_no_result
= atomic_xor_optab
;
6796 op
->fetch_before
= sync_old_xor_optab
;
6797 op
->fetch_after
= sync_new_xor_optab
;
6798 op
->no_result
= sync_xor_optab
;
6799 op
->reverse_code
= XOR
;
6802 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6803 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6804 op
->mem_no_result
= atomic_and_optab
;
6805 op
->fetch_before
= sync_old_and_optab
;
6806 op
->fetch_after
= sync_new_and_optab
;
6807 op
->no_result
= sync_and_optab
;
6808 op
->reverse_code
= UNKNOWN
;
6811 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6812 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6813 op
->mem_no_result
= atomic_or_optab
;
6814 op
->fetch_before
= sync_old_ior_optab
;
6815 op
->fetch_after
= sync_new_ior_optab
;
6816 op
->no_result
= sync_ior_optab
;
6817 op
->reverse_code
= UNKNOWN
;
6820 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6821 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6822 op
->mem_no_result
= atomic_nand_optab
;
6823 op
->fetch_before
= sync_old_nand_optab
;
6824 op
->fetch_after
= sync_new_nand_optab
;
6825 op
->no_result
= sync_nand_optab
;
6826 op
->reverse_code
= UNKNOWN
;
6833 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6834 using memory order MODEL. If AFTER is true the operation needs to return
6835 the value of *MEM after the operation, otherwise the previous value.
6836 TARGET is an optional place to place the result. The result is unused if
6838 Return the result if there is a better sequence, otherwise NULL_RTX. */
6841 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6842 enum memmodel model
, bool after
)
6844 /* If the value is prefetched, or not used, it may be possible to replace
6845 the sequence with a native exchange operation. */
6846 if (!after
|| target
== const0_rtx
)
6848 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6849 if (code
== AND
&& val
== const0_rtx
)
6851 if (target
== const0_rtx
)
6852 target
= gen_reg_rtx (GET_MODE (mem
));
6853 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6856 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6857 if (code
== IOR
&& val
== constm1_rtx
)
6859 if (target
== const0_rtx
)
6860 target
= gen_reg_rtx (GET_MODE (mem
));
6861 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6868 /* Try to emit an instruction for a specific operation varaition.
6869 OPTAB contains the OP functions.
6870 TARGET is an optional place to return the result. const0_rtx means unused.
6871 MEM is the memory location to operate on.
6872 VAL is the value to use in the operation.
6873 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6874 MODEL is the memory model, if used.
6875 AFTER is true if the returned result is the value after the operation. */
6878 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6879 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6881 machine_mode mode
= GET_MODE (mem
);
6882 class expand_operand ops
[4];
6883 enum insn_code icode
;
6887 /* Check to see if there is a result returned. */
6888 if (target
== const0_rtx
)
6892 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6893 create_integer_operand (&ops
[2], model
);
6898 icode
= direct_optab_handler (optab
->no_result
, mode
);
6902 /* Otherwise, we need to generate a result. */
6907 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6908 : optab
->mem_fetch_before
, mode
);
6909 create_integer_operand (&ops
[3], model
);
6914 icode
= optab_handler (after
? optab
->fetch_after
6915 : optab
->fetch_before
, mode
);
6918 create_output_operand (&ops
[op_counter
++], target
, mode
);
6920 if (icode
== CODE_FOR_nothing
)
6923 create_fixed_operand (&ops
[op_counter
++], mem
);
6924 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6925 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6927 if (maybe_expand_insn (icode
, num_ops
, ops
))
6928 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6934 /* This function expands an atomic fetch_OP or OP_fetch operation:
6935 TARGET is an option place to stick the return value. const0_rtx indicates
6936 the result is unused.
6937 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6938 CODE is the operation being performed (OP)
6939 MEMMODEL is the memory model variant to use.
6940 AFTER is true to return the result of the operation (OP_fetch).
6941 AFTER is false to return the value before the operation (fetch_OP).
6943 This function will *only* generate instructions if there is a direct
6944 optab. No compare and swap loops or libcalls will be generated. */
6947 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6948 enum rtx_code code
, enum memmodel model
,
6951 machine_mode mode
= GET_MODE (mem
);
6952 struct atomic_op_functions optab
;
6954 bool unused_result
= (target
== const0_rtx
);
6956 get_atomic_op_for_code (&optab
, code
);
6958 /* Check to see if there are any better instructions. */
6959 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6963 /* Check for the case where the result isn't used and try those patterns. */
6966 /* Try the memory model variant first. */
6967 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6971 /* Next try the old style withuot a memory model. */
6972 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6976 /* There is no no-result pattern, so try patterns with a result. */
6980 /* Try the __atomic version. */
6981 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6985 /* Try the older __sync version. */
6986 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6990 /* If the fetch value can be calculated from the other variation of fetch,
6991 try that operation. */
6992 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6994 /* Try the __atomic version, then the older __sync version. */
6995 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6997 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
7001 /* If the result isn't used, no need to do compensation code. */
7005 /* Issue compensation code. Fetch_after == fetch_before OP val.
7006 Fetch_before == after REVERSE_OP val. */
7008 code
= optab
.reverse_code
;
7011 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
7012 true, OPTAB_LIB_WIDEN
);
7013 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
7016 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7017 true, OPTAB_LIB_WIDEN
);
7022 /* No direct opcode can be generated. */
7028 /* This function expands an atomic fetch_OP or OP_fetch operation:
7029 TARGET is an option place to stick the return value. const0_rtx indicates
7030 the result is unused.
7031 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7032 CODE is the operation being performed (OP)
7033 MEMMODEL is the memory model variant to use.
7034 AFTER is true to return the result of the operation (OP_fetch).
7035 AFTER is false to return the value before the operation (fetch_OP). */
7037 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7038 enum memmodel model
, bool after
)
7040 machine_mode mode
= GET_MODE (mem
);
7042 bool unused_result
= (target
== const0_rtx
);
7044 /* If loads are not atomic for the required size and we are not called to
7045 provide a __sync builtin, do not do anything so that we stay consistent
7046 with atomic loads of the same size. */
7047 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
7050 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
7056 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7057 if (code
== PLUS
|| code
== MINUS
)
7060 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
7063 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
7064 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
7068 /* PLUS worked so emit the insns and return. */
7075 /* PLUS did not work, so throw away the negation code and continue. */
7079 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7080 if (!can_compare_and_swap_p (mode
, false))
7084 enum rtx_code orig_code
= code
;
7085 struct atomic_op_functions optab
;
7087 get_atomic_op_for_code (&optab
, code
);
7088 libfunc
= optab_libfunc (after
? optab
.fetch_after
7089 : optab
.fetch_before
, mode
);
7091 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
7095 code
= optab
.reverse_code
;
7096 libfunc
= optab_libfunc (after
? optab
.fetch_before
7097 : optab
.fetch_after
, mode
);
7099 if (libfunc
!= NULL
)
7101 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7102 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
7103 addr
, ptr_mode
, val
, mode
);
7105 if (!unused_result
&& fixup
)
7106 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7107 true, OPTAB_LIB_WIDEN
);
7111 /* We need the original code for any further attempts. */
7115 /* If nothing else has succeeded, default to a compare and swap loop. */
7116 if (can_compare_and_swap_p (mode
, true))
7119 rtx t0
= gen_reg_rtx (mode
), t1
;
7123 /* If the result is used, get a register for it. */
7126 if (!target
|| !register_operand (target
, mode
))
7127 target
= gen_reg_rtx (mode
);
7128 /* If fetch_before, copy the value now. */
7130 emit_move_insn (target
, t0
);
7133 target
= const0_rtx
;
7138 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7139 true, OPTAB_LIB_WIDEN
);
7140 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7143 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7146 /* For after, copy the value now. */
7147 if (!unused_result
&& after
)
7148 emit_move_insn (target
, t1
);
7149 insn
= get_insns ();
7152 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7159 /* Return true if OPERAND is suitable for operand number OPNO of
7160 instruction ICODE. */
7163 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7165 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7166 || (insn_data
[(int) icode
].operand
[opno
].predicate
7167 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7170 /* TARGET is a target of a multiword operation that we are going to
7171 implement as a series of word-mode operations. Return true if
7172 TARGET is suitable for this purpose. */
7175 valid_multiword_target_p (rtx target
)
7180 mode
= GET_MODE (target
);
7181 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7183 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7184 if (!validate_subreg (word_mode
, mode
, target
, i
))
7189 /* Make OP describe an input operand that has value INTVAL and that has
7190 no inherent mode. This function should only be used for operands that
7191 are always expand-time constants. The backend may request that INTVAL
7192 be copied into a different kind of rtx, but it must specify the mode
7193 of that rtx if so. */
7196 create_integer_operand (class expand_operand
*op
, poly_int64 intval
)
7198 create_expand_operand (op
, EXPAND_INTEGER
,
7199 gen_int_mode (intval
, MAX_MODE_INT
),
7200 VOIDmode
, false, intval
);
7203 /* Like maybe_legitimize_operand, but do not change the code of the
7204 current rtx value. */
7207 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7208 class expand_operand
*op
)
7210 /* See if the operand matches in its current form. */
7211 if (insn_operand_matches (icode
, opno
, op
->value
))
7214 /* If the operand is a memory whose address has no side effects,
7215 try forcing the address into a non-virtual pseudo register.
7216 The check for side effects is important because copy_to_mode_reg
7217 cannot handle things like auto-modified addresses. */
7218 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7223 addr
= XEXP (mem
, 0);
7224 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7225 && !side_effects_p (addr
))
7230 last
= get_last_insn ();
7231 mode
= get_address_mode (mem
);
7232 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
7233 if (insn_operand_matches (icode
, opno
, mem
))
7238 delete_insns_since (last
);
7245 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7246 on success, storing the new operand value back in OP. */
7249 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7250 class expand_operand
*op
)
7252 machine_mode mode
, imode
, tmode
;
7259 temporary_volatile_ok
v (true);
7260 return maybe_legitimize_operand_same_code (icode
, opno
, op
);
7264 gcc_assert (mode
!= VOIDmode
);
7266 && op
->value
!= const0_rtx
7267 && GET_MODE (op
->value
) == mode
7268 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7271 op
->value
= gen_reg_rtx (mode
);
7277 gcc_assert (mode
!= VOIDmode
);
7278 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7279 || GET_MODE (op
->value
) == mode
);
7280 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7283 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7286 case EXPAND_CONVERT_TO
:
7287 gcc_assert (mode
!= VOIDmode
);
7288 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7291 case EXPAND_CONVERT_FROM
:
7292 if (GET_MODE (op
->value
) != VOIDmode
)
7293 mode
= GET_MODE (op
->value
);
7295 /* The caller must tell us what mode this value has. */
7296 gcc_assert (mode
!= VOIDmode
);
7298 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7299 tmode
= (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
)
7300 ? GET_MODE_INNER (imode
) : imode
);
7301 if (tmode
!= VOIDmode
&& tmode
!= mode
)
7303 op
->value
= convert_modes (tmode
, mode
, op
->value
, op
->unsigned_p
);
7306 if (imode
!= VOIDmode
&& imode
!= mode
)
7308 gcc_assert (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
));
7309 op
->value
= expand_vector_broadcast (imode
, op
->value
);
7314 case EXPAND_ADDRESS
:
7315 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
7319 case EXPAND_INTEGER
:
7320 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7321 if (mode
!= VOIDmode
7322 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
7325 op
->value
= gen_int_mode (op
->int_value
, mode
);
7330 return insn_operand_matches (icode
, opno
, op
->value
);
7333 /* Make OP describe an input operand that should have the same value
7334 as VALUE, after any mode conversion that the target might request.
7335 TYPE is the type of VALUE. */
7338 create_convert_operand_from_type (class expand_operand
*op
,
7339 rtx value
, tree type
)
7341 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7342 TYPE_UNSIGNED (type
));
7345 /* Return true if the requirements on operands OP1 and OP2 of instruction
7346 ICODE are similar enough for the result of legitimizing OP1 to be
7347 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7348 with OP1 and OP2 respectively. */
7351 can_reuse_operands_p (enum insn_code icode
,
7352 unsigned int opno1
, unsigned int opno2
,
7353 const class expand_operand
*op1
,
7354 const class expand_operand
*op2
)
7356 /* Check requirements that are common to all types. */
7357 if (op1
->type
!= op2
->type
7358 || op1
->mode
!= op2
->mode
7359 || (insn_data
[(int) icode
].operand
[opno1
].mode
7360 != insn_data
[(int) icode
].operand
[opno2
].mode
))
7363 /* Check the requirements for specific types. */
7367 /* Outputs must remain distinct. */
7372 case EXPAND_ADDRESS
:
7373 case EXPAND_INTEGER
:
7376 case EXPAND_CONVERT_TO
:
7377 case EXPAND_CONVERT_FROM
:
7378 return op1
->unsigned_p
== op2
->unsigned_p
;
7383 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7384 of instruction ICODE. Return true on success, leaving the new operand
7385 values in the OPS themselves. Emit no code on failure. */
7388 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7389 unsigned int nops
, class expand_operand
*ops
)
7391 rtx_insn
*last
= get_last_insn ();
7392 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
7393 for (unsigned int i
= 0; i
< nops
; i
++)
7395 orig_values
[i
] = ops
[i
].value
;
7397 /* First try reusing the result of an earlier legitimization.
7398 This avoids duplicate rtl and ensures that tied operands
7401 This search is linear, but NOPS is bounded at compile time
7402 to a small number (current a single digit). */
7405 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
7406 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
7408 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
7410 ops
[i
].value
= copy_rtx (ops
[j
].value
);
7414 /* Otherwise try legitimizing the operand on its own. */
7415 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7417 delete_insns_since (last
);
7424 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7425 as its operands. Return the instruction pattern on success,
7426 and emit any necessary set-up code. Return null and emit no
7430 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7431 class expand_operand
*ops
)
7433 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7434 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7440 return GEN_FCN (icode
) (ops
[0].value
);
7442 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7444 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7446 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7449 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7450 ops
[3].value
, ops
[4].value
);
7452 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7453 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7455 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7456 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7459 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7460 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7461 ops
[6].value
, ops
[7].value
);
7463 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7464 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7465 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7470 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7471 as its operands. Return true on success and emit no code on failure. */
7474 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7475 class expand_operand
*ops
)
7477 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7486 /* Like maybe_expand_insn, but for jumps. */
7489 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7490 class expand_operand
*ops
)
7492 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7495 emit_jump_insn (pat
);
7501 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7505 expand_insn (enum insn_code icode
, unsigned int nops
,
7506 class expand_operand
*ops
)
7508 if (!maybe_expand_insn (icode
, nops
, ops
))
7512 /* Like expand_insn, but for jumps. */
7515 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7516 class expand_operand
*ops
)
7518 if (!maybe_expand_jump_insn (icode
, nops
, ops
))