1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
48 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
50 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
51 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
53 /* Debug facility for use in GDB. */
54 void debug_optab_libfuncs (void);
56 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
57 the result of operation CODE applied to OP0 (and OP1 if it is a binary
58 operation). OP0_MODE is OP0's mode.
60 If the last insn does not set TARGET, don't do anything, but return 1.
62 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
63 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
64 try again, ensuring that TARGET is not one of the operands. */
67 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
,
68 rtx op1
, machine_mode op0_mode
)
74 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
76 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
77 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
78 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
79 && GET_RTX_CLASS (code
) != RTX_COMPARE
80 && GET_RTX_CLASS (code
) != RTX_UNARY
)
83 if (GET_CODE (target
) == ZERO_EXTRACT
)
86 for (last_insn
= insns
;
87 NEXT_INSN (last_insn
) != NULL_RTX
;
88 last_insn
= NEXT_INSN (last_insn
))
91 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
92 a value changing in the insn, so the note would be invalid for CSE. */
93 if (reg_overlap_mentioned_p (target
, op0
)
94 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
97 && (rtx_equal_p (target
, op0
)
98 || (op1
&& rtx_equal_p (target
, op1
))))
100 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
101 over expanding it as temp = MEM op X, MEM = temp. If the target
102 supports MEM = MEM op X instructions, it is sometimes too hard
103 to reconstruct that form later, especially if X is also a memory,
104 and due to multiple occurrences of addresses the address might
105 be forced into register unnecessarily.
106 Note that not emitting the REG_EQUIV note might inhibit
107 CSE in some cases. */
108 set
= single_set (last_insn
);
110 && GET_CODE (SET_SRC (set
)) == code
111 && MEM_P (SET_DEST (set
))
112 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
113 || (op1
&& rtx_equal_p (SET_DEST (set
),
114 XEXP (SET_SRC (set
), 1)))))
120 set
= set_for_reg_notes (last_insn
);
124 if (! rtx_equal_p (SET_DEST (set
), target
)
125 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
126 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
127 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
130 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
140 if (op0_mode
!= VOIDmode
&& GET_MODE (target
) != op0_mode
)
142 note
= gen_rtx_fmt_e (code
, op0_mode
, copy_rtx (op0
));
143 if (GET_MODE_UNIT_SIZE (op0_mode
)
144 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
145 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
148 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
154 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
158 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
160 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
165 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
166 for a widening operation would be. In most cases this would be OP0, but if
167 that's a constant it'll be VOIDmode, which isn't useful. */
170 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
172 machine_mode m0
= GET_MODE (op0
);
173 machine_mode m1
= GET_MODE (op1
);
176 if (m0
== VOIDmode
&& m1
== VOIDmode
)
178 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
183 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
189 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
190 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
191 not actually do a sign-extend or zero-extend, but can leave the
192 higher-order bits of the result rtx undefined, for example, in the case
193 of logical operations, but not right shifts. */
196 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
197 int unsignedp
, int no_extend
)
200 scalar_int_mode int_mode
;
202 /* If we don't have to extend and this is a constant, return it. */
203 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
206 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
207 extend since it will be more efficient to do so unless the signedness of
208 a promoted object differs from our extension. */
210 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
211 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
212 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
213 return convert_modes (mode
, oldmode
, op
, unsignedp
);
215 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
217 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
218 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
220 /* Otherwise, get an object of MODE, clobber it, and set the low-order
223 result
= gen_reg_rtx (int_mode
);
224 emit_clobber (result
);
225 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
229 /* Expand vector widening operations.
231 There are two different classes of operations handled here:
232 1) Operations whose result is wider than all the arguments to the operation.
233 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
234 In this case OP0 and optionally OP1 would be initialized,
235 but WIDE_OP wouldn't (not relevant for this case).
236 2) Operations whose result is of the same size as the last argument to the
237 operation, but wider than all the other arguments to the operation.
238 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
239 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
241 E.g, when called to expand the following operations, this is how
242 the arguments will be initialized:
244 widening-sum 2 oprnd0 - oprnd1
245 widening-dot-product 3 oprnd0 oprnd1 oprnd2
246 widening-mult 2 oprnd0 oprnd1 -
247 type-promotion (vec-unpack) 1 oprnd0 - - */
250 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
251 rtx target
, int unsignedp
)
253 class expand_operand eops
[4];
254 tree oprnd0
, oprnd1
, oprnd2
;
255 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
256 optab widen_pattern_optab
;
257 enum insn_code icode
;
258 int nops
= TREE_CODE_LENGTH (ops
->code
);
263 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
264 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
265 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
266 /* The sign is from the result type rather than operand's type
269 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
270 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
271 || ops
->code
== VEC_UNPACK_LO_EXPR
)
272 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
273 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
274 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
275 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
277 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
278 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
279 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
280 the pattern number of elements in the wider vector. */
282 = (ops
->code
== VEC_UNPACK_HI_EXPR
283 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
288 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
289 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
290 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
291 icode
= find_widening_optab_handler (widen_pattern_optab
,
292 TYPE_MODE (TREE_TYPE (ops
->op2
)),
295 icode
= optab_handler (widen_pattern_optab
, tmode0
);
296 gcc_assert (icode
!= CODE_FOR_nothing
);
301 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
306 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
310 /* The last operand is of a wider mode than the rest of the operands. */
315 gcc_assert (tmode1
== tmode0
);
318 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
322 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
323 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
325 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
327 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
328 expand_insn (icode
, op
, eops
);
329 return eops
[0].value
;
332 /* Generate code to perform an operation specified by TERNARY_OPTAB
333 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
335 UNSIGNEDP is for the case where we have to widen the operands
336 to perform the operation. It says to use zero-extension.
338 If TARGET is nonzero, the value
339 is generated there, if it is convenient to do so.
340 In all cases an rtx is returned for the locus of the value;
341 this may or may not be TARGET. */
344 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
345 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
347 class expand_operand ops
[4];
348 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
350 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
352 create_output_operand (&ops
[0], target
, mode
);
353 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
354 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
355 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
356 expand_insn (icode
, 4, ops
);
361 /* Like expand_binop, but return a constant rtx if the result can be
362 calculated at compile time. The arguments and return value are
363 otherwise the same as for expand_binop. */
366 simplify_expand_binop (machine_mode mode
, optab binoptab
,
367 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
368 enum optab_methods methods
)
370 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
372 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
378 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
381 /* Like simplify_expand_binop, but always put the result in TARGET.
382 Return true if the expansion succeeded. */
385 force_expand_binop (machine_mode mode
, optab binoptab
,
386 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
387 enum optab_methods methods
)
389 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
390 target
, unsignedp
, methods
);
394 emit_move_insn (target
, x
);
398 /* Create a new vector value in VMODE with all elements set to OP. The
399 mode of OP must be the element mode of VMODE. If OP is a constant,
400 then the return value will be a constant. */
403 expand_vector_broadcast (machine_mode vmode
, rtx op
)
408 gcc_checking_assert (VECTOR_MODE_P (vmode
));
410 if (valid_for_const_vector_p (vmode
, op
))
411 return gen_const_vec_duplicate (vmode
, op
);
413 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
414 if (icode
!= CODE_FOR_nothing
)
416 class expand_operand ops
[2];
417 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
418 create_input_operand (&ops
[1], op
, GET_MODE (op
));
419 expand_insn (icode
, 2, ops
);
423 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
426 /* ??? If the target doesn't have a vec_init, then we have no easy way
427 of performing this operation. Most of this sort of generic support
428 is hidden away in the vector lowering support in gimple. */
429 icode
= convert_optab_handler (vec_init_optab
, vmode
,
430 GET_MODE_INNER (vmode
));
431 if (icode
== CODE_FOR_nothing
)
434 vec
= rtvec_alloc (n
);
435 for (int i
= 0; i
< n
; ++i
)
436 RTVEC_ELT (vec
, i
) = op
;
437 rtx ret
= gen_reg_rtx (vmode
);
438 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
443 /* This subroutine of expand_doubleword_shift handles the cases in which
444 the effective shift value is >= BITS_PER_WORD. The arguments and return
445 value are the same as for the parent routine, except that SUPERWORD_OP1
446 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
447 INTO_TARGET may be null if the caller has decided to calculate it. */
450 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
451 rtx outof_target
, rtx into_target
,
452 int unsignedp
, enum optab_methods methods
)
454 if (into_target
!= 0)
455 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
456 into_target
, unsignedp
, methods
))
459 if (outof_target
!= 0)
461 /* For a signed right shift, we must fill OUTOF_TARGET with copies
462 of the sign bit, otherwise we must fill it with zeros. */
463 if (binoptab
!= ashr_optab
)
464 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
466 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
467 gen_int_shift_amount (word_mode
,
469 outof_target
, unsignedp
, methods
))
475 /* This subroutine of expand_doubleword_shift handles the cases in which
476 the effective shift value is < BITS_PER_WORD. The arguments and return
477 value are the same as for the parent routine. */
480 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
481 rtx outof_input
, rtx into_input
, rtx op1
,
482 rtx outof_target
, rtx into_target
,
483 int unsignedp
, enum optab_methods methods
,
484 unsigned HOST_WIDE_INT shift_mask
)
486 optab reverse_unsigned_shift
, unsigned_shift
;
489 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
490 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
492 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
493 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
494 the opposite direction to BINOPTAB. */
495 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
497 carries
= outof_input
;
498 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
499 op1_mode
), op1_mode
);
500 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
505 /* We must avoid shifting by BITS_PER_WORD bits since that is either
506 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
507 has unknown behavior. Do a single shift first, then shift by the
508 remainder. It's OK to use ~OP1 as the remainder if shift counts
509 are truncated to the mode size. */
510 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
511 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
512 if (shift_mask
== BITS_PER_WORD
- 1)
514 tmp
= immed_wide_int_const
515 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
516 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
521 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
522 op1_mode
), op1_mode
);
523 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
527 if (tmp
== 0 || carries
== 0)
529 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
530 carries
, tmp
, 0, unsignedp
, methods
);
534 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
535 so the result can go directly into INTO_TARGET if convenient. */
536 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
537 into_target
, unsignedp
, methods
);
541 /* Now OR in the bits carried over from OUTOF_INPUT. */
542 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
543 into_target
, unsignedp
, methods
))
546 /* Use a standard word_mode shift for the out-of half. */
547 if (outof_target
!= 0)
548 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
549 outof_target
, unsignedp
, methods
))
556 /* Try implementing expand_doubleword_shift using conditional moves.
557 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
558 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
559 are the shift counts to use in the former and latter case. All other
560 arguments are the same as the parent routine. */
563 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
564 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
565 rtx outof_input
, rtx into_input
,
566 rtx subword_op1
, rtx superword_op1
,
567 rtx outof_target
, rtx into_target
,
568 int unsignedp
, enum optab_methods methods
,
569 unsigned HOST_WIDE_INT shift_mask
)
571 rtx outof_superword
, into_superword
;
573 /* Put the superword version of the output into OUTOF_SUPERWORD and
575 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
576 if (outof_target
!= 0 && subword_op1
== superword_op1
)
578 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
579 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
580 into_superword
= outof_target
;
581 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
582 outof_superword
, 0, unsignedp
, methods
))
587 into_superword
= gen_reg_rtx (word_mode
);
588 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
589 outof_superword
, into_superword
,
594 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
595 if (!expand_subword_shift (op1_mode
, binoptab
,
596 outof_input
, into_input
, subword_op1
,
597 outof_target
, into_target
,
598 unsignedp
, methods
, shift_mask
))
601 /* Select between them. Do the INTO half first because INTO_SUPERWORD
602 might be the current value of OUTOF_TARGET. */
603 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 into_target
, into_superword
, word_mode
, false))
607 if (outof_target
!= 0)
608 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
609 outof_target
, outof_superword
,
616 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
617 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
618 input operand; the shift moves bits in the direction OUTOF_INPUT->
619 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
620 of the target. OP1 is the shift count and OP1_MODE is its mode.
621 If OP1 is constant, it will have been truncated as appropriate
622 and is known to be nonzero.
624 If SHIFT_MASK is zero, the result of word shifts is undefined when the
625 shift count is outside the range [0, BITS_PER_WORD). This routine must
626 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
628 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
629 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
630 fill with zeros or sign bits as appropriate.
632 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
633 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
634 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
635 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
638 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
639 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
640 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
641 function wants to calculate it itself.
643 Return true if the shift could be successfully synthesized. */
646 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
647 rtx outof_input
, rtx into_input
, rtx op1
,
648 rtx outof_target
, rtx into_target
,
649 int unsignedp
, enum optab_methods methods
,
650 unsigned HOST_WIDE_INT shift_mask
)
652 rtx superword_op1
, tmp
, cmp1
, cmp2
;
653 enum rtx_code cmp_code
;
655 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
656 fill the result with sign or zero bits as appropriate. If so, the value
657 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
658 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
659 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
661 This isn't worthwhile for constant shifts since the optimizers will
662 cope better with in-range shift counts. */
663 if (shift_mask
>= BITS_PER_WORD
665 && !CONSTANT_P (op1
))
667 if (!expand_doubleword_shift (op1_mode
, binoptab
,
668 outof_input
, into_input
, op1
,
670 unsignedp
, methods
, shift_mask
))
672 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
673 outof_target
, unsignedp
, methods
))
678 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
679 is true when the effective shift value is less than BITS_PER_WORD.
680 Set SUPERWORD_OP1 to the shift count that should be used to shift
681 OUTOF_INPUT into INTO_TARGET when the condition is false. */
682 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
683 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
685 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
686 is a subword shift count. */
687 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
689 cmp2
= CONST0_RTX (op1_mode
);
695 /* Set CMP1 to OP1 - BITS_PER_WORD. */
696 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
698 cmp2
= CONST0_RTX (op1_mode
);
700 superword_op1
= cmp1
;
705 /* If we can compute the condition at compile time, pick the
706 appropriate subroutine. */
707 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
708 if (tmp
!= 0 && CONST_INT_P (tmp
))
710 if (tmp
== const0_rtx
)
711 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
712 outof_target
, into_target
,
715 return expand_subword_shift (op1_mode
, binoptab
,
716 outof_input
, into_input
, op1
,
717 outof_target
, into_target
,
718 unsignedp
, methods
, shift_mask
);
721 /* Try using conditional moves to generate straight-line code. */
722 if (HAVE_conditional_move
)
724 rtx_insn
*start
= get_last_insn ();
725 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
726 cmp_code
, cmp1
, cmp2
,
727 outof_input
, into_input
,
729 outof_target
, into_target
,
730 unsignedp
, methods
, shift_mask
))
732 delete_insns_since (start
);
735 /* As a last resort, use branches to select the correct alternative. */
736 rtx_code_label
*subword_label
= gen_label_rtx ();
737 rtx_code_label
*done_label
= gen_label_rtx ();
740 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
742 profile_probability::uninitialized ());
745 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
746 outof_target
, into_target
,
750 emit_jump_insn (targetm
.gen_jump (done_label
));
752 emit_label (subword_label
);
754 if (!expand_subword_shift (op1_mode
, binoptab
,
755 outof_input
, into_input
, op1
,
756 outof_target
, into_target
,
757 unsignedp
, methods
, shift_mask
))
760 emit_label (done_label
);
764 /* Subroutine of expand_binop. Perform a double word multiplication of
765 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
766 as the target's word_mode. This function return NULL_RTX if anything
767 goes wrong, in which case it may have already emitted instructions
768 which need to be deleted.
770 If we want to multiply two two-word values and have normal and widening
771 multiplies of single-word values, we can do this with three smaller
774 The multiplication proceeds as follows:
775 _______________________
776 [__op0_high_|__op0_low__]
777 _______________________
778 * [__op1_high_|__op1_low__]
779 _______________________________________________
780 _______________________
781 (1) [__op0_low__*__op1_low__]
782 _______________________
783 (2a) [__op0_low__*__op1_high_]
784 _______________________
785 (2b) [__op0_high_*__op1_low__]
786 _______________________
787 (3) [__op0_high_*__op1_high_]
790 This gives a 4-word result. Since we are only interested in the
791 lower 2 words, partial result (3) and the upper words of (2a) and
792 (2b) don't need to be calculated. Hence (2a) and (2b) can be
793 calculated using non-widening multiplication.
795 (1), however, needs to be calculated with an unsigned widening
796 multiplication. If this operation is not directly supported we
797 try using a signed widening multiplication and adjust the result.
798 This adjustment works as follows:
800 If both operands are positive then no adjustment is needed.
802 If the operands have different signs, for example op0_low < 0 and
803 op1_low >= 0, the instruction treats the most significant bit of
804 op0_low as a sign bit instead of a bit with significance
805 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
806 with 2**BITS_PER_WORD - op0_low, and two's complements the
807 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
810 Similarly, if both operands are negative, we need to add
811 (op0_low + op1_low) * 2**BITS_PER_WORD.
813 We use a trick to adjust quickly. We logically shift op0_low right
814 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
815 op0_high (op1_high) before it is used to calculate 2b (2a). If no
816 logical shift exists, we do an arithmetic right shift and subtract
820 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
821 bool umulp
, enum optab_methods methods
)
823 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
824 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
825 rtx wordm1
= (umulp
? NULL_RTX
826 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
827 rtx product
, adjust
, product_high
, temp
;
829 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
830 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
831 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
832 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
834 /* If we're using an unsigned multiply to directly compute the product
835 of the low-order words of the operands and perform any required
836 adjustments of the operands, we begin by trying two more multiplications
837 and then computing the appropriate sum.
839 We have checked above that the required addition is provided.
840 Full-word addition will normally always succeed, especially if
841 it is provided at all, so we don't worry about its failure. The
842 multiplication may well fail, however, so we do handle that. */
846 /* ??? This could be done with emit_store_flag where available. */
847 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
848 NULL_RTX
, 1, methods
);
850 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
851 NULL_RTX
, 0, OPTAB_DIRECT
);
854 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
855 NULL_RTX
, 0, methods
);
858 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
859 NULL_RTX
, 0, OPTAB_DIRECT
);
866 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
867 NULL_RTX
, 0, OPTAB_DIRECT
);
871 /* OP0_HIGH should now be dead. */
875 /* ??? This could be done with emit_store_flag where available. */
876 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
877 NULL_RTX
, 1, methods
);
879 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
880 NULL_RTX
, 0, OPTAB_DIRECT
);
883 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
884 NULL_RTX
, 0, methods
);
887 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
888 NULL_RTX
, 0, OPTAB_DIRECT
);
895 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
896 NULL_RTX
, 0, OPTAB_DIRECT
);
900 /* OP1_HIGH should now be dead. */
902 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
903 NULL_RTX
, 0, OPTAB_DIRECT
);
905 if (target
&& !REG_P (target
))
908 /* *_widen_optab needs to determine operand mode, make sure at least
909 one operand has non-VOID mode. */
910 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
911 op0_low
= force_reg (word_mode
, op0_low
);
914 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
915 target
, 1, OPTAB_DIRECT
);
917 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
918 target
, 1, OPTAB_DIRECT
);
923 product_high
= operand_subword (product
, high
, 1, mode
);
924 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
925 NULL_RTX
, 0, OPTAB_DIRECT
);
926 emit_move_insn (product_high
, adjust
);
930 /* Wrapper around expand_binop which takes an rtx code to specify
931 the operation to perform, not an optab pointer. All other
932 arguments are the same. */
934 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
935 rtx op1
, rtx target
, int unsignedp
,
936 enum optab_methods methods
)
938 optab binop
= code_to_optab (code
);
941 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
944 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
945 binop. Order them according to commutative_operand_precedence and, if
946 possible, try to put TARGET or a pseudo first. */
948 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
950 int op0_prec
= commutative_operand_precedence (op0
);
951 int op1_prec
= commutative_operand_precedence (op1
);
953 if (op0_prec
< op1_prec
)
956 if (op0_prec
> op1_prec
)
959 /* With equal precedence, both orders are ok, but it is better if the
960 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
961 if (target
== 0 || REG_P (target
))
962 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
964 return rtx_equal_p (op1
, target
);
967 /* Return true if BINOPTAB implements a shift operation. */
970 shift_optab_p (optab binoptab
)
972 switch (optab_to_code (binoptab
))
988 /* Return true if BINOPTAB implements a commutative binary operation. */
991 commutative_optab_p (optab binoptab
)
993 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
994 || binoptab
== smul_widen_optab
995 || binoptab
== umul_widen_optab
996 || binoptab
== smul_highpart_optab
997 || binoptab
== umul_highpart_optab
);
1000 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1001 optimizing, and if the operand is a constant that costs more than
1002 1 instruction, force the constant into a register and return that
1003 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1006 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1007 int opn
, rtx x
, bool unsignedp
)
1009 bool speed
= optimize_insn_for_speed_p ();
1011 if (mode
!= VOIDmode
1014 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1015 > set_src_cost (x
, mode
, speed
)))
1017 if (CONST_INT_P (x
))
1019 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1020 if (intval
!= INTVAL (x
))
1021 x
= GEN_INT (intval
);
1024 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1025 x
= force_reg (mode
, x
);
1030 /* Helper function for expand_binop: handle the case where there
1031 is an insn ICODE that directly implements the indicated operation.
1032 Returns null if this is not possible. */
1034 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1036 rtx target
, int unsignedp
, enum optab_methods methods
,
1039 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1040 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1041 machine_mode mode0
, mode1
, tmp_mode
;
1042 class expand_operand ops
[3];
1045 rtx xop0
= op0
, xop1
= op1
;
1046 bool canonicalize_op1
= false;
1048 /* If it is a commutative operator and the modes would match
1049 if we would swap the operands, we can save the conversions. */
1050 commutative_p
= commutative_optab_p (binoptab
);
1052 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1053 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode0
)
1054 std::swap (xop0
, xop1
);
1056 /* If we are optimizing, force expensive constants into a register. */
1057 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1058 if (!shift_optab_p (binoptab
))
1059 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1061 /* Shifts and rotates often use a different mode for op1 from op0;
1062 for VOIDmode constants we don't know the mode, so force it
1063 to be canonicalized using convert_modes. */
1064 canonicalize_op1
= true;
1066 /* In case the insn wants input operands in modes different from
1067 those of the actual operands, convert the operands. It would
1068 seem that we don't need to convert CONST_INTs, but we do, so
1069 that they're properly zero-extended, sign-extended or truncated
1072 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1073 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1075 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1079 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1080 ? GET_MODE (xop1
) : mode
);
1081 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1083 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1087 /* If operation is commutative,
1088 try to make the first operand a register.
1089 Even better, try to make it the same as the target.
1090 Also try to make the last operand a constant. */
1092 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1093 std::swap (xop0
, xop1
);
1095 /* Now, if insn's predicates don't allow our operands, put them into
1098 if (binoptab
== vec_pack_trunc_optab
1099 || binoptab
== vec_pack_usat_optab
1100 || binoptab
== vec_pack_ssat_optab
1101 || binoptab
== vec_pack_ufix_trunc_optab
1102 || binoptab
== vec_pack_sfix_trunc_optab
1103 || binoptab
== vec_packu_float_optab
1104 || binoptab
== vec_packs_float_optab
)
1106 /* The mode of the result is different then the mode of the
1108 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1109 if (VECTOR_MODE_P (mode
)
1110 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1112 delete_insns_since (last
);
1119 create_output_operand (&ops
[0], target
, tmp_mode
);
1120 create_input_operand (&ops
[1], xop0
, mode0
);
1121 create_input_operand (&ops
[2], xop1
, mode1
);
1122 pat
= maybe_gen_insn (icode
, 3, ops
);
1125 /* If PAT is composed of more than one insn, try to add an appropriate
1126 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1127 operand, call expand_binop again, this time without a target. */
1128 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1129 && ! add_equal_note (pat
, ops
[0].value
,
1130 optab_to_code (binoptab
),
1131 ops
[1].value
, ops
[2].value
, mode0
))
1133 delete_insns_since (last
);
1134 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1135 unsignedp
, methods
);
1139 return ops
[0].value
;
1141 delete_insns_since (last
);
1145 /* Generate code to perform an operation specified by BINOPTAB
1146 on operands OP0 and OP1, with result having machine-mode MODE.
1148 UNSIGNEDP is for the case where we have to widen the operands
1149 to perform the operation. It says to use zero-extension.
1151 If TARGET is nonzero, the value
1152 is generated there, if it is convenient to do so.
1153 In all cases an rtx is returned for the locus of the value;
1154 this may or may not be TARGET. */
1157 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1158 rtx target
, int unsignedp
, enum optab_methods methods
)
1160 enum optab_methods next_methods
1161 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1162 ? OPTAB_WIDEN
: methods
);
1163 enum mode_class mclass
;
1164 enum insn_code icode
;
1165 machine_mode wider_mode
;
1166 scalar_int_mode int_mode
;
1169 rtx_insn
*entry_last
= get_last_insn ();
1172 mclass
= GET_MODE_CLASS (mode
);
1174 /* If subtracting an integer constant, convert this into an addition of
1175 the negated constant. */
1177 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1179 op1
= negate_rtx (mode
, op1
);
1180 binoptab
= add_optab
;
1182 /* For shifts, constant invalid op1 might be expanded from different
1183 mode than MODE. As those are invalid, force them to a register
1184 to avoid further problems during expansion. */
1185 else if (CONST_INT_P (op1
)
1186 && shift_optab_p (binoptab
)
1187 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1189 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1190 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1193 /* Record where to delete back to if we backtrack. */
1194 last
= get_last_insn ();
1196 /* If we can do it with a three-operand insn, do so. */
1198 if (methods
!= OPTAB_MUST_WIDEN
)
1200 if (convert_optab_p (binoptab
))
1202 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1203 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1206 icode
= optab_handler (binoptab
, mode
);
1207 if (icode
!= CODE_FOR_nothing
)
1209 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1210 target
, unsignedp
, methods
, last
);
1216 /* If we were trying to rotate, and that didn't work, try rotating
1217 the other direction before falling back to shifts and bitwise-or. */
1218 if (((binoptab
== rotl_optab
1219 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1220 || (binoptab
== rotr_optab
1221 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1222 && is_int_mode (mode
, &int_mode
))
1224 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1226 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1228 if (CONST_INT_P (op1
))
1229 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1230 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1231 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1233 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1234 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1235 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1237 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1238 target
, unsignedp
, methods
, last
);
1243 /* If this is a multiply, see if we can do a widening operation that
1244 takes operands of this mode and makes a wider mode. */
1246 if (binoptab
== smul_optab
1247 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1248 && (convert_optab_handler ((unsignedp
1250 : smul_widen_optab
),
1251 wider_mode
, mode
) != CODE_FOR_nothing
))
1253 /* *_widen_optab needs to determine operand mode, make sure at least
1254 one operand has non-VOID mode. */
1255 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1256 op0
= force_reg (mode
, op0
);
1257 temp
= expand_binop (wider_mode
,
1258 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1259 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1263 if (GET_MODE_CLASS (mode
) == MODE_INT
1264 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1265 return gen_lowpart (mode
, temp
);
1267 return convert_to_mode (mode
, temp
, unsignedp
);
1271 /* If this is a vector shift by a scalar, see if we can do a vector
1272 shift by a vector. If so, broadcast the scalar into a vector. */
1273 if (mclass
== MODE_VECTOR_INT
)
1275 optab otheroptab
= unknown_optab
;
1277 if (binoptab
== ashl_optab
)
1278 otheroptab
= vashl_optab
;
1279 else if (binoptab
== ashr_optab
)
1280 otheroptab
= vashr_optab
;
1281 else if (binoptab
== lshr_optab
)
1282 otheroptab
= vlshr_optab
;
1283 else if (binoptab
== rotl_optab
)
1284 otheroptab
= vrotl_optab
;
1285 else if (binoptab
== rotr_optab
)
1286 otheroptab
= vrotr_optab
;
1289 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1291 /* The scalar may have been extended to be too wide. Truncate
1292 it back to the proper size to fit in the broadcast vector. */
1293 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1294 if (!CONST_INT_P (op1
)
1295 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1296 > GET_MODE_BITSIZE (inner_mode
)))
1297 op1
= force_reg (inner_mode
,
1298 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1300 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1303 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1304 target
, unsignedp
, methods
, last
);
1311 /* Look for a wider mode of the same class for which we think we
1312 can open-code the operation. Check for a widening multiply at the
1313 wider mode as well. */
1315 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1316 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1317 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1319 machine_mode next_mode
;
1320 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1321 || (binoptab
== smul_optab
1322 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1323 && (find_widening_optab_handler ((unsignedp
1325 : smul_widen_optab
),
1327 != CODE_FOR_nothing
)))
1329 rtx xop0
= op0
, xop1
= op1
;
1332 /* For certain integer operations, we need not actually extend
1333 the narrow operands, as long as we will truncate
1334 the results to the same narrowness. */
1336 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1337 || binoptab
== xor_optab
1338 || binoptab
== add_optab
|| binoptab
== sub_optab
1339 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1340 && mclass
== MODE_INT
)
1343 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1345 if (binoptab
!= ashl_optab
)
1346 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1350 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1352 /* The second operand of a shift must always be extended. */
1353 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1354 no_extend
&& binoptab
!= ashl_optab
);
1356 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1357 unsignedp
, OPTAB_DIRECT
);
1360 if (mclass
!= MODE_INT
1361 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1364 target
= gen_reg_rtx (mode
);
1365 convert_move (target
, temp
, 0);
1369 return gen_lowpart (mode
, temp
);
1372 delete_insns_since (last
);
1376 /* If operation is commutative,
1377 try to make the first operand a register.
1378 Even better, try to make it the same as the target.
1379 Also try to make the last operand a constant. */
1380 if (commutative_optab_p (binoptab
)
1381 && swap_commutative_operands_with_target (target
, op0
, op1
))
1382 std::swap (op0
, op1
);
1384 /* These can be done a word at a time. */
1385 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1386 && is_int_mode (mode
, &int_mode
)
1387 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1388 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1393 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1394 won't be accurate, so use a new target. */
1398 || !valid_multiword_target_p (target
))
1399 target
= gen_reg_rtx (int_mode
);
1403 /* Do the actual arithmetic. */
1404 machine_mode op0_mode
= GET_MODE (op0
);
1405 machine_mode op1_mode
= GET_MODE (op1
);
1406 if (op0_mode
== VOIDmode
)
1407 op0_mode
= int_mode
;
1408 if (op1_mode
== VOIDmode
)
1409 op1_mode
= int_mode
;
1410 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1412 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1413 rtx x
= expand_binop (word_mode
, binoptab
,
1414 operand_subword_force (op0
, i
, op0_mode
),
1415 operand_subword_force (op1
, i
, op1_mode
),
1416 target_piece
, unsignedp
, next_methods
);
1421 if (target_piece
!= x
)
1422 emit_move_insn (target_piece
, x
);
1425 insns
= get_insns ();
1428 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1435 /* Synthesize double word shifts from single word shifts. */
1436 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1437 || binoptab
== ashr_optab
)
1438 && is_int_mode (mode
, &int_mode
)
1439 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1440 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1441 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1442 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1443 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1444 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1446 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1447 scalar_int_mode op1_mode
;
1449 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1450 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1451 op1_mode
= (GET_MODE (op1
) != VOIDmode
1452 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1455 /* Apply the truncation to constant shifts. */
1456 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1457 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1459 if (op1
== CONST0_RTX (op1_mode
))
1462 /* Make sure that this is a combination that expand_doubleword_shift
1463 can handle. See the comments there for details. */
1464 if (double_shift_mask
== 0
1465 || (shift_mask
== BITS_PER_WORD
- 1
1466 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1469 rtx into_target
, outof_target
;
1470 rtx into_input
, outof_input
;
1471 int left_shift
, outof_word
;
1473 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1474 won't be accurate, so use a new target. */
1478 || !valid_multiword_target_p (target
))
1479 target
= gen_reg_rtx (int_mode
);
1483 /* OUTOF_* is the word we are shifting bits away from, and
1484 INTO_* is the word that we are shifting bits towards, thus
1485 they differ depending on the direction of the shift and
1486 WORDS_BIG_ENDIAN. */
1488 left_shift
= binoptab
== ashl_optab
;
1489 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1491 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1492 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1494 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1495 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1497 if (expand_doubleword_shift (op1_mode
, binoptab
,
1498 outof_input
, into_input
, op1
,
1499 outof_target
, into_target
,
1500 unsignedp
, next_methods
, shift_mask
))
1502 insns
= get_insns ();
1512 /* Synthesize double word rotates from single word shifts. */
1513 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1514 && is_int_mode (mode
, &int_mode
)
1515 && CONST_INT_P (op1
)
1516 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1517 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1518 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1521 rtx into_target
, outof_target
;
1522 rtx into_input
, outof_input
;
1524 int shift_count
, left_shift
, outof_word
;
1526 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1527 won't be accurate, so use a new target. Do this also if target is not
1528 a REG, first because having a register instead may open optimization
1529 opportunities, and second because if target and op0 happen to be MEMs
1530 designating the same location, we would risk clobbering it too early
1531 in the code sequence we generate below. */
1536 || !valid_multiword_target_p (target
))
1537 target
= gen_reg_rtx (int_mode
);
1541 shift_count
= INTVAL (op1
);
1543 /* OUTOF_* is the word we are shifting bits away from, and
1544 INTO_* is the word that we are shifting bits towards, thus
1545 they differ depending on the direction of the shift and
1546 WORDS_BIG_ENDIAN. */
1548 left_shift
= (binoptab
== rotl_optab
);
1549 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1551 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1552 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1554 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1555 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1557 if (shift_count
== BITS_PER_WORD
)
1559 /* This is just a word swap. */
1560 emit_move_insn (outof_target
, into_input
);
1561 emit_move_insn (into_target
, outof_input
);
1566 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1567 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1568 optab reverse_unsigned_shift
, unsigned_shift
;
1570 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1571 ? lshr_optab
: ashl_optab
);
1573 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1574 ? ashl_optab
: lshr_optab
);
1576 if (shift_count
> BITS_PER_WORD
)
1578 first_shift_count
= shift_count
- BITS_PER_WORD
;
1579 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1583 first_shift_count
= BITS_PER_WORD
- shift_count
;
1584 second_shift_count
= shift_count
;
1586 rtx first_shift_count_rtx
1587 = gen_int_shift_amount (word_mode
, first_shift_count
);
1588 rtx second_shift_count_rtx
1589 = gen_int_shift_amount (word_mode
, second_shift_count
);
1591 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1592 outof_input
, first_shift_count_rtx
,
1593 NULL_RTX
, unsignedp
, next_methods
);
1594 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1595 into_input
, second_shift_count_rtx
,
1596 NULL_RTX
, unsignedp
, next_methods
);
1598 if (into_temp1
!= 0 && into_temp2
!= 0)
1599 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1600 into_target
, unsignedp
, next_methods
);
1604 if (inter
!= 0 && inter
!= into_target
)
1605 emit_move_insn (into_target
, inter
);
1607 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1608 into_input
, first_shift_count_rtx
,
1609 NULL_RTX
, unsignedp
, next_methods
);
1610 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1611 outof_input
, second_shift_count_rtx
,
1612 NULL_RTX
, unsignedp
, next_methods
);
1614 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1615 inter
= expand_binop (word_mode
, ior_optab
,
1616 outof_temp1
, outof_temp2
,
1617 outof_target
, unsignedp
, next_methods
);
1619 if (inter
!= 0 && inter
!= outof_target
)
1620 emit_move_insn (outof_target
, inter
);
1623 insns
= get_insns ();
1633 /* These can be done a word at a time by propagating carries. */
1634 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1635 && is_int_mode (mode
, &int_mode
)
1636 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1637 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1640 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1641 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1642 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1643 rtx xop0
, xop1
, xtarget
;
1645 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1646 value is one of those, use it. Otherwise, use 1 since it is the
1647 one easiest to get. */
1648 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1649 int normalizep
= STORE_FLAG_VALUE
;
1654 /* Prepare the operands. */
1655 xop0
= force_reg (int_mode
, op0
);
1656 xop1
= force_reg (int_mode
, op1
);
1658 xtarget
= gen_reg_rtx (int_mode
);
1660 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1663 /* Indicate for flow that the entire target reg is being set. */
1665 emit_clobber (xtarget
);
1667 /* Do the actual arithmetic. */
1668 for (i
= 0; i
< nwords
; i
++)
1670 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1671 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
1672 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
1673 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
1676 /* Main add/subtract of the input operands. */
1677 x
= expand_binop (word_mode
, binoptab
,
1678 op0_piece
, op1_piece
,
1679 target_piece
, unsignedp
, next_methods
);
1685 /* Store carry from main add/subtract. */
1686 carry_out
= gen_reg_rtx (word_mode
);
1687 carry_out
= emit_store_flag_force (carry_out
,
1688 (binoptab
== add_optab
1691 word_mode
, 1, normalizep
);
1698 /* Add/subtract previous carry to main result. */
1699 newx
= expand_binop (word_mode
,
1700 normalizep
== 1 ? binoptab
: otheroptab
,
1702 NULL_RTX
, 1, next_methods
);
1706 /* Get out carry from adding/subtracting carry in. */
1707 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1708 carry_tmp
= emit_store_flag_force (carry_tmp
,
1709 (binoptab
== add_optab
1712 word_mode
, 1, normalizep
);
1714 /* Logical-ior the two poss. carry together. */
1715 carry_out
= expand_binop (word_mode
, ior_optab
,
1716 carry_out
, carry_tmp
,
1717 carry_out
, 0, next_methods
);
1721 emit_move_insn (target_piece
, newx
);
1725 if (x
!= target_piece
)
1726 emit_move_insn (target_piece
, x
);
1729 carry_in
= carry_out
;
1732 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
1734 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
1735 || ! rtx_equal_p (target
, xtarget
))
1737 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1739 set_dst_reg_note (temp
, REG_EQUAL
,
1740 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1741 int_mode
, copy_rtx (xop0
),
1752 delete_insns_since (last
);
1755 /* Attempt to synthesize double word multiplies using a sequence of word
1756 mode multiplications. We first attempt to generate a sequence using a
1757 more efficient unsigned widening multiply, and if that fails we then
1758 try using a signed widening multiply. */
1760 if (binoptab
== smul_optab
1761 && is_int_mode (mode
, &int_mode
)
1762 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1763 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1764 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1766 rtx product
= NULL_RTX
;
1767 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
1768 != CODE_FOR_nothing
)
1770 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
1773 delete_insns_since (last
);
1776 if (product
== NULL_RTX
1777 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
1778 != CODE_FOR_nothing
))
1780 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
1783 delete_insns_since (last
);
1786 if (product
!= NULL_RTX
)
1788 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
1790 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
1792 set_dst_reg_note (move
,
1794 gen_rtx_fmt_ee (MULT
, int_mode
,
1797 target
? target
: product
);
1803 /* It can't be open-coded in this mode.
1804 Use a library call if one is available and caller says that's ok. */
1806 libfunc
= optab_libfunc (binoptab
, mode
);
1808 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1812 machine_mode op1_mode
= mode
;
1817 if (shift_optab_p (binoptab
))
1819 op1_mode
= targetm
.libgcc_shift_count_mode ();
1820 /* Specify unsigned here,
1821 since negative shift counts are meaningless. */
1822 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1825 if (GET_MODE (op0
) != VOIDmode
1826 && GET_MODE (op0
) != mode
)
1827 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1829 /* Pass 1 for NO_QUEUE so we don't lose any increments
1830 if the libcall is cse'd or moved. */
1831 value
= emit_library_call_value (libfunc
,
1832 NULL_RTX
, LCT_CONST
, mode
,
1833 op0
, mode
, op1x
, op1_mode
);
1835 insns
= get_insns ();
1838 bool trapv
= trapv_binoptab_p (binoptab
);
1839 target
= gen_reg_rtx (mode
);
1840 emit_libcall_block_1 (insns
, target
, value
,
1842 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1843 mode
, op0
, op1
), trapv
);
1848 delete_insns_since (last
);
1850 /* It can't be done in this mode. Can we do it in a wider mode? */
1852 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1853 || methods
== OPTAB_MUST_WIDEN
))
1855 /* Caller says, don't even try. */
1856 delete_insns_since (entry_last
);
1860 /* Compute the value of METHODS to pass to recursive calls.
1861 Don't allow widening to be tried recursively. */
1863 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1865 /* Look for a wider mode of the same class for which it appears we can do
1868 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1870 /* This code doesn't make sense for conversion optabs, since we
1871 wouldn't then want to extend the operands to be the same size
1873 gcc_assert (!convert_optab_p (binoptab
));
1874 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1876 if (optab_handler (binoptab
, wider_mode
)
1877 || (methods
== OPTAB_LIB
1878 && optab_libfunc (binoptab
, wider_mode
)))
1880 rtx xop0
= op0
, xop1
= op1
;
1883 /* For certain integer operations, we need not actually extend
1884 the narrow operands, as long as we will truncate
1885 the results to the same narrowness. */
1887 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1888 || binoptab
== xor_optab
1889 || binoptab
== add_optab
|| binoptab
== sub_optab
1890 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1891 && mclass
== MODE_INT
)
1894 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1895 unsignedp
, no_extend
);
1897 /* The second operand of a shift must always be extended. */
1898 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1899 no_extend
&& binoptab
!= ashl_optab
);
1901 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1902 unsignedp
, methods
);
1905 if (mclass
!= MODE_INT
1906 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1909 target
= gen_reg_rtx (mode
);
1910 convert_move (target
, temp
, 0);
1914 return gen_lowpart (mode
, temp
);
1917 delete_insns_since (last
);
1922 delete_insns_since (entry_last
);
1926 /* Expand a binary operator which has both signed and unsigned forms.
1927 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1930 If we widen unsigned operands, we may use a signed wider operation instead
1931 of an unsigned wider operation, since the result would be the same. */
1934 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1935 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1936 enum optab_methods methods
)
1939 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1942 /* Do it without widening, if possible. */
1943 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1944 unsignedp
, OPTAB_DIRECT
);
1945 if (temp
|| methods
== OPTAB_DIRECT
)
1948 /* Try widening to a signed int. Disable any direct use of any
1949 signed insn in the current mode. */
1950 save_enable
= swap_optab_enable (soptab
, mode
, false);
1952 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1953 unsignedp
, OPTAB_WIDEN
);
1955 /* For unsigned operands, try widening to an unsigned int. */
1956 if (!temp
&& unsignedp
)
1957 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1958 unsignedp
, OPTAB_WIDEN
);
1959 if (temp
|| methods
== OPTAB_WIDEN
)
1962 /* Use the right width libcall if that exists. */
1963 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1964 unsignedp
, OPTAB_LIB
);
1965 if (temp
|| methods
== OPTAB_LIB
)
1968 /* Must widen and use a libcall, use either signed or unsigned. */
1969 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1970 unsignedp
, methods
);
1971 if (!temp
&& unsignedp
)
1972 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1973 unsignedp
, methods
);
1976 /* Undo the fiddling above. */
1978 swap_optab_enable (soptab
, mode
, true);
1982 /* Generate code to perform an operation specified by UNOPPTAB
1983 on operand OP0, with two results to TARG0 and TARG1.
1984 We assume that the order of the operands for the instruction
1985 is TARG0, TARG1, OP0.
1987 Either TARG0 or TARG1 may be zero, but what that means is that
1988 the result is not actually wanted. We will generate it into
1989 a dummy pseudo-reg and discard it. They may not both be zero.
1991 Returns 1 if this operation can be performed; 0 if not. */
1994 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1997 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1998 enum mode_class mclass
;
1999 machine_mode wider_mode
;
2000 rtx_insn
*entry_last
= get_last_insn ();
2003 mclass
= GET_MODE_CLASS (mode
);
2006 targ0
= gen_reg_rtx (mode
);
2008 targ1
= gen_reg_rtx (mode
);
2010 /* Record where to go back to if we fail. */
2011 last
= get_last_insn ();
2013 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2015 class expand_operand ops
[3];
2016 enum insn_code icode
= optab_handler (unoptab
, mode
);
2018 create_fixed_operand (&ops
[0], targ0
);
2019 create_fixed_operand (&ops
[1], targ1
);
2020 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2021 if (maybe_expand_insn (icode
, 3, ops
))
2025 /* It can't be done in this mode. Can we do it in a wider mode? */
2027 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2029 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2031 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2033 rtx t0
= gen_reg_rtx (wider_mode
);
2034 rtx t1
= gen_reg_rtx (wider_mode
);
2035 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2037 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2039 convert_move (targ0
, t0
, unsignedp
);
2040 convert_move (targ1
, t1
, unsignedp
);
2044 delete_insns_since (last
);
2049 delete_insns_since (entry_last
);
2053 /* Generate code to perform an operation specified by BINOPTAB
2054 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2055 We assume that the order of the operands for the instruction
2056 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2057 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2059 Either TARG0 or TARG1 may be zero, but what that means is that
2060 the result is not actually wanted. We will generate it into
2061 a dummy pseudo-reg and discard it. They may not both be zero.
2063 Returns 1 if this operation can be performed; 0 if not. */
2066 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2069 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2070 enum mode_class mclass
;
2071 machine_mode wider_mode
;
2072 rtx_insn
*entry_last
= get_last_insn ();
2075 mclass
= GET_MODE_CLASS (mode
);
2078 targ0
= gen_reg_rtx (mode
);
2080 targ1
= gen_reg_rtx (mode
);
2082 /* Record where to go back to if we fail. */
2083 last
= get_last_insn ();
2085 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2087 class expand_operand ops
[4];
2088 enum insn_code icode
= optab_handler (binoptab
, mode
);
2089 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2090 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2091 rtx xop0
= op0
, xop1
= op1
;
2093 /* If we are optimizing, force expensive constants into a register. */
2094 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2095 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2097 create_fixed_operand (&ops
[0], targ0
);
2098 create_convert_operand_from (&ops
[1], xop0
, mode
, unsignedp
);
2099 create_convert_operand_from (&ops
[2], xop1
, mode
, unsignedp
);
2100 create_fixed_operand (&ops
[3], targ1
);
2101 if (maybe_expand_insn (icode
, 4, ops
))
2103 delete_insns_since (last
);
2106 /* It can't be done in this mode. Can we do it in a wider mode? */
2108 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2110 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2112 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2114 rtx t0
= gen_reg_rtx (wider_mode
);
2115 rtx t1
= gen_reg_rtx (wider_mode
);
2116 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2117 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2119 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2122 convert_move (targ0
, t0
, unsignedp
);
2123 convert_move (targ1
, t1
, unsignedp
);
2127 delete_insns_since (last
);
2132 delete_insns_since (entry_last
);
2136 /* Expand the two-valued library call indicated by BINOPTAB, but
2137 preserve only one of the values. If TARG0 is non-NULL, the first
2138 value is placed into TARG0; otherwise the second value is placed
2139 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2140 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2141 This routine assumes that the value returned by the library call is
2142 as if the return value was of an integral mode twice as wide as the
2143 mode of OP0. Returns 1 if the call was successful. */
2146 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2147 rtx targ0
, rtx targ1
, enum rtx_code code
)
2150 machine_mode libval_mode
;
2155 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2156 gcc_assert (!targ0
!= !targ1
);
2158 mode
= GET_MODE (op0
);
2159 libfunc
= optab_libfunc (binoptab
, mode
);
2163 /* The value returned by the library function will have twice as
2164 many bits as the nominal MODE. */
2165 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2167 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2171 /* Get the part of VAL containing the value that we want. */
2172 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2173 targ0
? 0 : GET_MODE_SIZE (mode
));
2174 insns
= get_insns ();
2176 /* Move the into the desired location. */
2177 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2178 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2184 /* Wrapper around expand_unop which takes an rtx code to specify
2185 the operation to perform, not an optab pointer. All other
2186 arguments are the same. */
2188 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2189 rtx target
, int unsignedp
)
2191 optab unop
= code_to_optab (code
);
2194 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2200 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2202 A similar operation can be used for clrsb. UNOPTAB says which operation
2203 we are trying to expand. */
2205 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2207 opt_scalar_int_mode wider_mode_iter
;
2208 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2210 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2211 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2216 last
= get_last_insn ();
2219 target
= gen_reg_rtx (mode
);
2220 xop0
= widen_operand (op0
, wider_mode
, mode
,
2221 unoptab
!= clrsb_optab
, false);
2222 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2223 unoptab
!= clrsb_optab
);
2226 (wider_mode
, sub_optab
, temp
,
2227 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2228 - GET_MODE_PRECISION (mode
),
2230 target
, true, OPTAB_DIRECT
);
2232 delete_insns_since (last
);
2240 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2241 quantities, choosing which based on whether the high word is nonzero. */
2243 expand_doubleword_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2245 rtx xop0
= force_reg (mode
, op0
);
2246 rtx subhi
= gen_highpart (word_mode
, xop0
);
2247 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2248 rtx_code_label
*hi0_label
= gen_label_rtx ();
2249 rtx_code_label
*after_label
= gen_label_rtx ();
2253 /* If we were not given a target, use a word_mode register, not a
2254 'mode' register. The result will fit, and nobody is expecting
2255 anything bigger (the return type of __builtin_clz* is int). */
2257 target
= gen_reg_rtx (word_mode
);
2259 /* In any case, write to a word_mode scratch in both branches of the
2260 conditional, so we can ensure there is a single move insn setting
2261 'target' to tag a REG_EQUAL note on. */
2262 result
= gen_reg_rtx (word_mode
);
2266 /* If the high word is not equal to zero,
2267 then clz of the full value is clz of the high word. */
2268 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2269 word_mode
, true, hi0_label
);
2271 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2276 convert_move (result
, temp
, true);
2278 emit_jump_insn (targetm
.gen_jump (after_label
));
2281 /* Else clz of the full value is clz of the low word plus the number
2282 of bits in the high word. */
2283 emit_label (hi0_label
);
2285 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2288 temp
= expand_binop (word_mode
, add_optab
, temp
,
2289 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2290 result
, true, OPTAB_DIRECT
);
2294 convert_move (result
, temp
, true);
2296 emit_label (after_label
);
2297 convert_move (target
, result
, true);
2302 add_equal_note (seq
, target
, CLZ
, xop0
, NULL_RTX
, mode
);
2311 /* Try calculating popcount of a double-word quantity as two popcount's of
2312 word-sized quantities and summing up the results. */
2314 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2321 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2322 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2324 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2325 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2333 /* If we were not given a target, use a word_mode register, not a
2334 'mode' register. The result will fit, and nobody is expecting
2335 anything bigger (the return type of __builtin_popcount* is int). */
2337 target
= gen_reg_rtx (word_mode
);
2339 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2344 add_equal_note (seq
, t
, POPCOUNT
, op0
, NULL_RTX
, mode
);
2352 (parity:narrow (low (x) ^ high (x))) */
2354 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2356 rtx t
= expand_binop (word_mode
, xor_optab
,
2357 operand_subword_force (op0
, 0, mode
),
2358 operand_subword_force (op0
, 1, mode
),
2359 NULL_RTX
, 0, OPTAB_DIRECT
);
2360 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2366 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2368 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2372 opt_scalar_int_mode wider_mode_iter
;
2374 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2375 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2376 != CODE_FOR_nothing
)
2379 if (!wider_mode_iter
.exists ())
2382 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2383 last
= get_last_insn ();
2385 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2386 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2388 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2389 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2391 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2392 GET_MODE_BITSIZE (wider_mode
)
2393 - GET_MODE_BITSIZE (mode
),
2399 target
= gen_reg_rtx (mode
);
2400 emit_move_insn (target
, gen_lowpart (mode
, x
));
2403 delete_insns_since (last
);
2408 /* Try calculating bswap as two bswaps of two word-sized operands. */
2411 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2415 t1
= expand_unop (word_mode
, bswap_optab
,
2416 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2417 t0
= expand_unop (word_mode
, bswap_optab
,
2418 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2420 if (target
== 0 || !valid_multiword_target_p (target
))
2421 target
= gen_reg_rtx (mode
);
2423 emit_clobber (target
);
2424 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2425 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2430 /* Try calculating (parity x) as (and (popcount x) 1), where
2431 popcount can also be done in a wider mode. */
2433 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2435 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2436 opt_scalar_int_mode wider_mode_iter
;
2437 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2439 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2440 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2445 last
= get_last_insn ();
2447 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2448 target
= gen_reg_rtx (wider_mode
);
2450 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2451 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2454 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2455 target
, true, OPTAB_DIRECT
);
2459 if (mclass
!= MODE_INT
2460 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2461 return convert_to_mode (mode
, temp
, 0);
2463 return gen_lowpart (mode
, temp
);
2466 delete_insns_since (last
);
2472 /* Try calculating ctz(x) as K - clz(x & -x) ,
2473 where K is GET_MODE_PRECISION(mode) - 1.
2475 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2476 don't have to worry about what the hardware does in that case. (If
2477 the clz instruction produces the usual value at 0, which is K, the
2478 result of this code sequence will be -1; expand_ffs, below, relies
2479 on this. It might be nice to have it be K instead, for consistency
2480 with the (very few) processors that provide a ctz with a defined
2481 value, but that would take one more instruction, and it would be
2482 less convenient for expand_ffs anyway. */
2485 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2490 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2495 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2497 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2498 true, OPTAB_DIRECT
);
2500 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2502 temp
= expand_binop (mode
, sub_optab
,
2503 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2505 true, OPTAB_DIRECT
);
2515 add_equal_note (seq
, temp
, CTZ
, op0
, NULL_RTX
, mode
);
2521 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2522 else with the sequence used by expand_clz.
2524 The ffs builtin promises to return zero for a zero value and ctz/clz
2525 may have an undefined value in that case. If they do not give us a
2526 convenient value, we have to generate a test and branch. */
2528 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
2530 HOST_WIDE_INT val
= 0;
2531 bool defined_at_zero
= false;
2535 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2539 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2543 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2545 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2548 temp
= expand_ctz (mode
, op0
, 0);
2552 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2554 defined_at_zero
= true;
2555 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2561 if (defined_at_zero
&& val
== -1)
2562 /* No correction needed at zero. */;
2565 /* We don't try to do anything clever with the situation found
2566 on some processors (eg Alpha) where ctz(0:mode) ==
2567 bitsize(mode). If someone can think of a way to send N to -1
2568 and leave alone all values in the range 0..N-1 (where N is a
2569 power of two), cheaper than this test-and-branch, please add it.
2571 The test-and-branch is done after the operation itself, in case
2572 the operation sets condition codes that can be recycled for this.
2573 (This is true on i386, for instance.) */
2575 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2576 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2577 mode
, true, nonzero_label
);
2579 convert_move (temp
, GEN_INT (-1), false);
2580 emit_label (nonzero_label
);
2583 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2584 to produce a value in the range 0..bitsize. */
2585 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2586 target
, false, OPTAB_DIRECT
);
2593 add_equal_note (seq
, temp
, FFS
, op0
, NULL_RTX
, mode
);
2602 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2603 conditions, VAL may already be a SUBREG against which we cannot generate
2604 a further SUBREG. In this case, we expect forcing the value into a
2605 register will work around the situation. */
2608 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2612 ret
= lowpart_subreg (omode
, val
, imode
);
2615 val
= force_reg (imode
, val
);
2616 ret
= lowpart_subreg (omode
, val
, imode
);
2617 gcc_assert (ret
!= NULL
);
2622 /* Expand a floating point absolute value or negation operation via a
2623 logical operation on the sign bit. */
2626 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
2627 rtx op0
, rtx target
)
2629 const struct real_format
*fmt
;
2630 int bitpos
, word
, nwords
, i
;
2631 scalar_int_mode imode
;
2635 /* The format has to have a simple sign bit. */
2636 fmt
= REAL_MODE_FORMAT (mode
);
2640 bitpos
= fmt
->signbit_rw
;
2644 /* Don't create negative zeros if the format doesn't support them. */
2645 if (code
== NEG
&& !fmt
->has_signed_zero
)
2648 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2650 if (!int_mode_for_mode (mode
).exists (&imode
))
2659 if (FLOAT_WORDS_BIG_ENDIAN
)
2660 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2662 word
= bitpos
/ BITS_PER_WORD
;
2663 bitpos
= bitpos
% BITS_PER_WORD
;
2664 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2667 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2673 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2674 target
= gen_reg_rtx (mode
);
2680 for (i
= 0; i
< nwords
; ++i
)
2682 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2683 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2687 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2689 immed_wide_int_const (mask
, imode
),
2690 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2691 if (temp
!= targ_piece
)
2692 emit_move_insn (targ_piece
, temp
);
2695 emit_move_insn (targ_piece
, op0_piece
);
2698 insns
= get_insns ();
2705 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2706 gen_lowpart (imode
, op0
),
2707 immed_wide_int_const (mask
, imode
),
2708 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2709 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2711 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2712 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2719 /* As expand_unop, but will fail rather than attempt the operation in a
2720 different mode or with a libcall. */
2722 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2725 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2727 class expand_operand ops
[2];
2728 enum insn_code icode
= optab_handler (unoptab
, mode
);
2729 rtx_insn
*last
= get_last_insn ();
2732 create_output_operand (&ops
[0], target
, mode
);
2733 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2734 pat
= maybe_gen_insn (icode
, 2, ops
);
2737 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2738 && ! add_equal_note (pat
, ops
[0].value
,
2739 optab_to_code (unoptab
),
2740 ops
[1].value
, NULL_RTX
, mode
))
2742 delete_insns_since (last
);
2743 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2748 return ops
[0].value
;
2754 /* Generate code to perform an operation specified by UNOPTAB
2755 on operand OP0, with result having machine-mode MODE.
2757 UNSIGNEDP is for the case where we have to widen the operands
2758 to perform the operation. It says to use zero-extension.
2760 If TARGET is nonzero, the value
2761 is generated there, if it is convenient to do so.
2762 In all cases an rtx is returned for the locus of the value;
2763 this may or may not be TARGET. */
2766 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2769 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2770 machine_mode wider_mode
;
2771 scalar_int_mode int_mode
;
2772 scalar_float_mode float_mode
;
2776 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2780 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2782 /* Widening (or narrowing) clz needs special treatment. */
2783 if (unoptab
== clz_optab
)
2785 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2787 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
2791 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2792 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2794 temp
= expand_doubleword_clz (int_mode
, op0
, target
);
2803 if (unoptab
== clrsb_optab
)
2805 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2807 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
2814 if (unoptab
== popcount_optab
2815 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2816 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2817 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2818 && optimize_insn_for_speed_p ())
2820 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
2825 if (unoptab
== parity_optab
2826 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2827 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2828 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2829 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2830 && optimize_insn_for_speed_p ())
2832 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
2837 /* Widening (or narrowing) bswap needs special treatment. */
2838 if (unoptab
== bswap_optab
)
2840 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2841 or ROTATERT. First try these directly; if this fails, then try the
2842 obvious pair of shifts with allowed widening, as this will probably
2843 be always more efficient than the other fallback methods. */
2849 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2851 temp
= expand_binop (mode
, rotl_optab
, op0
,
2852 gen_int_shift_amount (mode
, 8),
2853 target
, unsignedp
, OPTAB_DIRECT
);
2858 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2860 temp
= expand_binop (mode
, rotr_optab
, op0
,
2861 gen_int_shift_amount (mode
, 8),
2862 target
, unsignedp
, OPTAB_DIRECT
);
2867 last
= get_last_insn ();
2869 temp1
= expand_binop (mode
, ashl_optab
, op0
,
2870 gen_int_shift_amount (mode
, 8), NULL_RTX
,
2871 unsignedp
, OPTAB_WIDEN
);
2872 temp2
= expand_binop (mode
, lshr_optab
, op0
,
2873 gen_int_shift_amount (mode
, 8), NULL_RTX
,
2874 unsignedp
, OPTAB_WIDEN
);
2877 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2878 unsignedp
, OPTAB_WIDEN
);
2883 delete_insns_since (last
);
2886 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2888 temp
= widen_bswap (int_mode
, op0
, target
);
2892 /* We do not provide a 128-bit bswap in libgcc so force the use of
2893 a double bswap for 64-bit targets. */
2894 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2895 && (UNITS_PER_WORD
== 8
2896 || optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
))
2898 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2907 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2908 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2910 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2913 rtx_insn
*last
= get_last_insn ();
2915 /* For certain operations, we need not actually extend
2916 the narrow operand, as long as we will truncate the
2917 results to the same narrowness. */
2919 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2920 (unoptab
== neg_optab
2921 || unoptab
== one_cmpl_optab
)
2922 && mclass
== MODE_INT
);
2924 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2929 if (mclass
!= MODE_INT
2930 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2933 target
= gen_reg_rtx (mode
);
2934 convert_move (target
, temp
, 0);
2938 return gen_lowpart (mode
, temp
);
2941 delete_insns_since (last
);
2945 /* These can be done a word at a time. */
2946 if (unoptab
== one_cmpl_optab
2947 && is_int_mode (mode
, &int_mode
)
2948 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
2949 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2954 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2955 target
= gen_reg_rtx (int_mode
);
2959 /* Do the actual arithmetic. */
2960 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
2962 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
2963 rtx x
= expand_unop (word_mode
, unoptab
,
2964 operand_subword_force (op0
, i
, int_mode
),
2965 target_piece
, unsignedp
);
2967 if (target_piece
!= x
)
2968 emit_move_insn (target_piece
, x
);
2971 insns
= get_insns ();
2978 /* Emit ~op0 as op0 ^ -1. */
2979 if (unoptab
== one_cmpl_optab
2980 && (SCALAR_INT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2981 && optab_handler (xor_optab
, mode
) != CODE_FOR_nothing
)
2983 temp
= expand_binop (mode
, xor_optab
, op0
, CONSTM1_RTX (mode
),
2984 target
, unsignedp
, OPTAB_DIRECT
);
2989 if (optab_to_code (unoptab
) == NEG
)
2991 /* Try negating floating point values by flipping the sign bit. */
2992 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
2994 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
2999 /* If there is no negation pattern, and we have no negative zero,
3000 try subtracting from zero. */
3001 if (!HONOR_SIGNED_ZEROS (mode
))
3003 temp
= expand_binop (mode
, (unoptab
== negv_optab
3004 ? subv_optab
: sub_optab
),
3005 CONST0_RTX (mode
), op0
, target
,
3006 unsignedp
, OPTAB_DIRECT
);
3012 /* Try calculating parity (x) as popcount (x) % 2. */
3013 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3015 temp
= expand_parity (int_mode
, op0
, target
);
3020 /* Try implementing ffs (x) in terms of clz (x). */
3021 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3023 temp
= expand_ffs (int_mode
, op0
, target
);
3028 /* Try implementing ctz (x) in terms of clz (x). */
3029 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3031 temp
= expand_ctz (int_mode
, op0
, target
);
3037 /* Now try a library call in this mode. */
3038 libfunc
= optab_libfunc (unoptab
, mode
);
3044 machine_mode outmode
= mode
;
3046 /* All of these functions return small values. Thus we choose to
3047 have them return something that isn't a double-word. */
3048 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3049 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3050 || unoptab
== parity_optab
)
3052 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3053 optab_libfunc (unoptab
, mode
)));
3057 /* Pass 1 for NO_QUEUE so we don't lose any increments
3058 if the libcall is cse'd or moved. */
3059 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3061 insns
= get_insns ();
3064 target
= gen_reg_rtx (outmode
);
3065 bool trapv
= trapv_unoptab_p (unoptab
);
3067 eq_value
= NULL_RTX
;
3070 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3071 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3072 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3073 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3074 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3075 outmode
, eq_value
, mode
);
3077 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3082 /* It can't be done in this mode. Can we do it in a wider mode? */
3084 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3086 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3088 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3089 || optab_libfunc (unoptab
, wider_mode
))
3092 rtx_insn
*last
= get_last_insn ();
3094 /* For certain operations, we need not actually extend
3095 the narrow operand, as long as we will truncate the
3096 results to the same narrowness. */
3097 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3098 (unoptab
== neg_optab
3099 || unoptab
== one_cmpl_optab
3100 || unoptab
== bswap_optab
)
3101 && mclass
== MODE_INT
);
3103 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3106 /* If we are generating clz using wider mode, adjust the
3107 result. Similarly for clrsb. */
3108 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3111 scalar_int_mode wider_int_mode
3112 = as_a
<scalar_int_mode
> (wider_mode
);
3113 int_mode
= as_a
<scalar_int_mode
> (mode
);
3115 (wider_mode
, sub_optab
, temp
,
3116 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3117 - GET_MODE_PRECISION (int_mode
),
3119 target
, true, OPTAB_DIRECT
);
3122 /* Likewise for bswap. */
3123 if (unoptab
== bswap_optab
&& temp
!= 0)
3125 scalar_int_mode wider_int_mode
3126 = as_a
<scalar_int_mode
> (wider_mode
);
3127 int_mode
= as_a
<scalar_int_mode
> (mode
);
3128 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3129 == GET_MODE_BITSIZE (wider_int_mode
)
3130 && GET_MODE_PRECISION (int_mode
)
3131 == GET_MODE_BITSIZE (int_mode
));
3133 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3134 GET_MODE_BITSIZE (wider_int_mode
)
3135 - GET_MODE_BITSIZE (int_mode
),
3141 if (mclass
!= MODE_INT
)
3144 target
= gen_reg_rtx (mode
);
3145 convert_move (target
, temp
, 0);
3149 return gen_lowpart (mode
, temp
);
3152 delete_insns_since (last
);
3157 /* One final attempt at implementing negation via subtraction,
3158 this time allowing widening of the operand. */
3159 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3162 temp
= expand_binop (mode
,
3163 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3164 CONST0_RTX (mode
), op0
,
3165 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3173 /* Emit code to compute the absolute value of OP0, with result to
3174 TARGET if convenient. (TARGET may be 0.) The return value says
3175 where the result actually is to be found.
3177 MODE is the mode of the operand; the mode of the result is
3178 different but can be deduced from MODE.
3183 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3184 int result_unsignedp
)
3188 if (GET_MODE_CLASS (mode
) != MODE_INT
3190 result_unsignedp
= 1;
3192 /* First try to do it with a special abs instruction. */
3193 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3198 /* For floating point modes, try clearing the sign bit. */
3199 scalar_float_mode float_mode
;
3200 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3202 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3207 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3208 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3209 && !HONOR_SIGNED_ZEROS (mode
))
3211 rtx_insn
*last
= get_last_insn ();
3213 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3216 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3222 delete_insns_since (last
);
3225 /* If this machine has expensive jumps, we can do integer absolute
3226 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3227 where W is the width of MODE. */
3229 scalar_int_mode int_mode
;
3230 if (is_int_mode (mode
, &int_mode
)
3231 && BRANCH_COST (optimize_insn_for_speed_p (),
3234 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3235 GET_MODE_PRECISION (int_mode
) - 1,
3238 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3241 temp
= expand_binop (int_mode
,
3242 result_unsignedp
? sub_optab
: subv_optab
,
3243 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3253 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3254 int result_unsignedp
, int safe
)
3257 rtx_code_label
*op1
;
3259 if (GET_MODE_CLASS (mode
) != MODE_INT
3261 result_unsignedp
= 1;
3263 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3267 /* If that does not win, use conditional jump and negate. */
3269 /* It is safe to use the target if it is the same
3270 as the source if this is also a pseudo register */
3271 if (op0
== target
&& REG_P (op0
)
3272 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3275 op1
= gen_label_rtx ();
3276 if (target
== 0 || ! safe
3277 || GET_MODE (target
) != mode
3278 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3280 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3281 target
= gen_reg_rtx (mode
);
3283 emit_move_insn (target
, op0
);
3286 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3287 NULL_RTX
, NULL
, op1
,
3288 profile_probability::uninitialized ());
3290 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3293 emit_move_insn (target
, op0
);
3299 /* Emit code to compute the one's complement absolute value of OP0
3300 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3301 (TARGET may be NULL_RTX.) The return value says where the result
3302 actually is to be found.
3304 MODE is the mode of the operand; the mode of the result is
3305 different but can be deduced from MODE. */
3308 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3312 /* Not applicable for floating point modes. */
3313 if (FLOAT_MODE_P (mode
))
3316 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3317 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3319 rtx_insn
*last
= get_last_insn ();
3321 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3323 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3329 delete_insns_since (last
);
3332 /* If this machine has expensive jumps, we can do one's complement
3333 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3335 scalar_int_mode int_mode
;
3336 if (is_int_mode (mode
, &int_mode
)
3337 && BRANCH_COST (optimize_insn_for_speed_p (),
3340 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3341 GET_MODE_PRECISION (int_mode
) - 1,
3344 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3354 /* A subroutine of expand_copysign, perform the copysign operation using the
3355 abs and neg primitives advertised to exist on the target. The assumption
3356 is that we have a split register file, and leaving op0 in fp registers,
3357 and not playing with subregs so much, will help the register allocator. */
3360 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3361 int bitpos
, bool op0_is_abs
)
3363 scalar_int_mode imode
;
3364 enum insn_code icode
;
3366 rtx_code_label
*label
;
3371 /* Check if the back end provides an insn that handles signbit for the
3373 icode
= optab_handler (signbit_optab
, mode
);
3374 if (icode
!= CODE_FOR_nothing
)
3376 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3377 sign
= gen_reg_rtx (imode
);
3378 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3382 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3384 if (!int_mode_for_mode (mode
).exists (&imode
))
3386 op1
= gen_lowpart (imode
, op1
);
3393 if (FLOAT_WORDS_BIG_ENDIAN
)
3394 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3396 word
= bitpos
/ BITS_PER_WORD
;
3397 bitpos
= bitpos
% BITS_PER_WORD
;
3398 op1
= operand_subword_force (op1
, word
, mode
);
3401 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3402 sign
= expand_binop (imode
, and_optab
, op1
,
3403 immed_wide_int_const (mask
, imode
),
3404 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3409 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3416 if (target
== NULL_RTX
)
3417 target
= copy_to_reg (op0
);
3419 emit_move_insn (target
, op0
);
3422 label
= gen_label_rtx ();
3423 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3425 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3426 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3428 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3430 emit_move_insn (target
, op0
);
3438 /* A subroutine of expand_copysign, perform the entire copysign operation
3439 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3440 is true if op0 is known to have its sign bit clear. */
3443 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3444 int bitpos
, bool op0_is_abs
)
3446 scalar_int_mode imode
;
3447 int word
, nwords
, i
;
3451 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3453 if (!int_mode_for_mode (mode
).exists (&imode
))
3462 if (FLOAT_WORDS_BIG_ENDIAN
)
3463 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3465 word
= bitpos
/ BITS_PER_WORD
;
3466 bitpos
= bitpos
% BITS_PER_WORD
;
3467 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3470 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3475 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3476 target
= gen_reg_rtx (mode
);
3482 for (i
= 0; i
< nwords
; ++i
)
3484 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3485 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3491 = expand_binop (imode
, and_optab
, op0_piece
,
3492 immed_wide_int_const (~mask
, imode
),
3493 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3494 op1
= expand_binop (imode
, and_optab
,
3495 operand_subword_force (op1
, i
, mode
),
3496 immed_wide_int_const (mask
, imode
),
3497 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3499 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3500 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3501 if (temp
!= targ_piece
)
3502 emit_move_insn (targ_piece
, temp
);
3505 emit_move_insn (targ_piece
, op0_piece
);
3508 insns
= get_insns ();
3515 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3516 immed_wide_int_const (mask
, imode
),
3517 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3519 op0
= gen_lowpart (imode
, op0
);
3521 op0
= expand_binop (imode
, and_optab
, op0
,
3522 immed_wide_int_const (~mask
, imode
),
3523 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3525 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3526 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3527 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3533 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3534 scalar floating point mode. Return NULL if we do not know how to
3535 expand the operation inline. */
3538 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3540 scalar_float_mode mode
;
3541 const struct real_format
*fmt
;
3545 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
3546 gcc_assert (GET_MODE (op1
) == mode
);
3548 /* First try to do it with a special instruction. */
3549 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3550 target
, 0, OPTAB_DIRECT
);
3554 fmt
= REAL_MODE_FORMAT (mode
);
3555 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3559 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3561 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3562 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3566 if (fmt
->signbit_ro
>= 0
3567 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3568 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3569 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3571 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3572 fmt
->signbit_ro
, op0_is_abs
);
3577 if (fmt
->signbit_rw
< 0)
3579 return expand_copysign_bit (mode
, op0
, op1
, target
,
3580 fmt
->signbit_rw
, op0_is_abs
);
3583 /* Generate an instruction whose insn-code is INSN_CODE,
3584 with two operands: an output TARGET and an input OP0.
3585 TARGET *must* be nonzero, and the output is always stored there.
3586 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3587 the value that is stored into TARGET.
3589 Return false if expansion failed. */
3592 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3595 class expand_operand ops
[2];
3598 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3599 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3600 pat
= maybe_gen_insn (icode
, 2, ops
);
3604 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3606 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
,
3611 if (ops
[0].value
!= target
)
3612 emit_move_insn (target
, ops
[0].value
);
3615 /* Generate an instruction whose insn-code is INSN_CODE,
3616 with two operands: an output TARGET and an input OP0.
3617 TARGET *must* be nonzero, and the output is always stored there.
3618 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3619 the value that is stored into TARGET. */
3622 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3624 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3628 struct no_conflict_data
3631 rtx_insn
*first
, *insn
;
3635 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3636 the currently examined clobber / store has to stay in the list of
3637 insns that constitute the actual libcall block. */
3639 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3641 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3643 /* If this inns directly contributes to setting the target, it must stay. */
3644 if (reg_overlap_mentioned_p (p
->target
, dest
))
3645 p
->must_stay
= true;
3646 /* If we haven't committed to keeping any other insns in the list yet,
3647 there is nothing more to check. */
3648 else if (p
->insn
== p
->first
)
3650 /* If this insn sets / clobbers a register that feeds one of the insns
3651 already in the list, this insn has to stay too. */
3652 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3653 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3654 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3655 /* Likewise if this insn depends on a register set by a previous
3656 insn in the list, or if it sets a result (presumably a hard
3657 register) that is set or clobbered by a previous insn.
3658 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3659 SET_DEST perform the former check on the address, and the latter
3660 check on the MEM. */
3661 || (GET_CODE (set
) == SET
3662 && (modified_in_p (SET_SRC (set
), p
->first
)
3663 || modified_in_p (SET_DEST (set
), p
->first
)
3664 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3665 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3666 p
->must_stay
= true;
3670 /* Emit code to make a call to a constant function or a library call.
3672 INSNS is a list containing all insns emitted in the call.
3673 These insns leave the result in RESULT. Our block is to copy RESULT
3674 to TARGET, which is logically equivalent to EQUIV.
3676 We first emit any insns that set a pseudo on the assumption that these are
3677 loading constants into registers; doing so allows them to be safely cse'ed
3678 between blocks. Then we emit all the other insns in the block, followed by
3679 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3680 note with an operand of EQUIV. */
3683 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3684 bool equiv_may_trap
)
3686 rtx final_dest
= target
;
3687 rtx_insn
*next
, *last
, *insn
;
3689 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3690 into a MEM later. Protect the libcall block from this change. */
3691 if (! REG_P (target
) || REG_USERVAR_P (target
))
3692 target
= gen_reg_rtx (GET_MODE (target
));
3694 /* If we're using non-call exceptions, a libcall corresponding to an
3695 operation that may trap may also trap. */
3696 /* ??? See the comment in front of make_reg_eh_region_note. */
3697 if (cfun
->can_throw_non_call_exceptions
3698 && (equiv_may_trap
|| may_trap_p (equiv
)))
3700 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3703 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3706 int lp_nr
= INTVAL (XEXP (note
, 0));
3707 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3708 remove_note (insn
, note
);
3714 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3715 reg note to indicate that this call cannot throw or execute a nonlocal
3716 goto (unless there is already a REG_EH_REGION note, in which case
3718 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3720 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3723 /* First emit all insns that set pseudos. Remove them from the list as
3724 we go. Avoid insns that set pseudos which were referenced in previous
3725 insns. These can be generated by move_by_pieces, for example,
3726 to update an address. Similarly, avoid insns that reference things
3727 set in previous insns. */
3729 for (insn
= insns
; insn
; insn
= next
)
3731 rtx set
= single_set (insn
);
3733 next
= NEXT_INSN (insn
);
3735 if (set
!= 0 && REG_P (SET_DEST (set
))
3736 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3738 struct no_conflict_data data
;
3740 data
.target
= const0_rtx
;
3744 note_stores (insn
, no_conflict_move_test
, &data
);
3745 if (! data
.must_stay
)
3747 if (PREV_INSN (insn
))
3748 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3753 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3759 /* Some ports use a loop to copy large arguments onto the stack.
3760 Don't move anything outside such a loop. */
3765 /* Write the remaining insns followed by the final copy. */
3766 for (insn
= insns
; insn
; insn
= next
)
3768 next
= NEXT_INSN (insn
);
3773 last
= emit_move_insn (target
, result
);
3775 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3777 if (final_dest
!= target
)
3778 emit_move_insn (final_dest
, target
);
3782 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
3784 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3787 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3788 PURPOSE describes how this comparison will be used. CODE is the rtx
3789 comparison code we will be using.
3791 ??? Actually, CODE is slightly weaker than that. A target is still
3792 required to implement all of the normal bcc operations, but not
3793 required to implement all (or any) of the unordered bcc operations. */
3796 can_compare_p (enum rtx_code code
, machine_mode mode
,
3797 enum can_compare_purpose purpose
)
3800 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3803 enum insn_code icode
;
3805 if (purpose
== ccp_jump
3806 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3807 && insn_operand_matches (icode
, 0, test
))
3809 if (purpose
== ccp_store_flag
3810 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3811 && insn_operand_matches (icode
, 1, test
))
3813 if (purpose
== ccp_cmov
3814 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3817 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
3818 PUT_MODE (test
, mode
);
3820 while (mode
!= VOIDmode
);
3825 /* Return whether the backend can emit a vector comparison for code CODE,
3826 comparing operands of mode CMP_OP_MODE and producing a result with
3830 can_vcond_compare_p (enum rtx_code code
, machine_mode value_mode
,
3831 machine_mode cmp_op_mode
)
3833 enum insn_code icode
;
3834 bool unsigned_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
|| code
== GEU
);
3835 rtx reg1
= alloca_raw_REG (cmp_op_mode
, LAST_VIRTUAL_REGISTER
+ 1);
3836 rtx reg2
= alloca_raw_REG (cmp_op_mode
, LAST_VIRTUAL_REGISTER
+ 2);
3837 rtx test
= alloca_rtx_fmt_ee (code
, value_mode
, reg1
, reg2
);
3839 return (icode
= get_vcond_icode (value_mode
, cmp_op_mode
, unsigned_p
))
3841 && insn_operand_matches (icode
, 3, test
);
3844 /* This function is called when we are going to emit a compare instruction that
3845 compares the values found in X and Y, using the rtl operator COMPARISON.
3847 If they have mode BLKmode, then SIZE specifies the size of both operands.
3849 UNSIGNEDP nonzero says that the operands are unsigned;
3850 this matters if they need to be widened (as given by METHODS).
3852 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3853 if we failed to produce one.
3855 *PMODE is the mode of the inputs (in case they are const_int).
3857 This function performs all the setup necessary so that the caller only has
3858 to emit a single comparison insn. This setup can involve doing a BLKmode
3859 comparison or emitting a library call to perform the comparison if no insn
3860 is available to handle it.
3861 The values which are passed in through pointers can be modified; the caller
3862 should perform the comparison on the modified values. Constant
3863 comparisons must have already been folded. */
3866 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3867 int unsignedp
, enum optab_methods methods
,
3868 rtx
*ptest
, machine_mode
*pmode
)
3870 machine_mode mode
= *pmode
;
3872 machine_mode cmp_mode
;
3873 enum mode_class mclass
;
3875 /* The other methods are not needed. */
3876 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3877 || methods
== OPTAB_LIB_WIDEN
);
3879 if (CONST_SCALAR_INT_P (y
))
3880 canonicalize_comparison (mode
, &comparison
, &y
);
3882 /* If we are optimizing, force expensive constants into a register. */
3883 if (CONSTANT_P (x
) && optimize
3884 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3885 > COSTS_N_INSNS (1)))
3886 x
= force_reg (mode
, x
);
3888 if (CONSTANT_P (y
) && optimize
3889 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3890 > COSTS_N_INSNS (1)))
3891 y
= force_reg (mode
, y
);
3894 /* Make sure if we have a canonical comparison. The RTL
3895 documentation states that canonical comparisons are required only
3896 for targets which have cc0. */
3897 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3900 /* Don't let both operands fail to indicate the mode. */
3901 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3902 x
= force_reg (mode
, x
);
3903 if (mode
== VOIDmode
)
3904 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3906 /* Handle all BLKmode compares. */
3908 if (mode
== BLKmode
)
3910 machine_mode result_mode
;
3911 enum insn_code cmp_code
;
3914 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3918 /* Try to use a memory block compare insn - either cmpstr
3919 or cmpmem will do. */
3920 opt_scalar_int_mode cmp_mode_iter
;
3921 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
3923 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
3924 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3925 if (cmp_code
== CODE_FOR_nothing
)
3926 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3927 if (cmp_code
== CODE_FOR_nothing
)
3928 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3929 if (cmp_code
== CODE_FOR_nothing
)
3932 /* Must make sure the size fits the insn's mode. */
3933 if (CONST_INT_P (size
)
3934 ? UINTVAL (size
) > GET_MODE_MASK (cmp_mode
)
3935 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
3936 > GET_MODE_BITSIZE (cmp_mode
)))
3939 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3940 result
= gen_reg_rtx (result_mode
);
3941 size
= convert_to_mode (cmp_mode
, size
, 1);
3942 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3944 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3945 *pmode
= result_mode
;
3949 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3952 /* Otherwise call a library function. */
3953 result
= emit_block_comp_via_libcall (x
, y
, size
);
3957 mode
= TYPE_MODE (integer_type_node
);
3958 methods
= OPTAB_LIB_WIDEN
;
3962 /* Don't allow operands to the compare to trap, as that can put the
3963 compare and branch in different basic blocks. */
3964 if (cfun
->can_throw_non_call_exceptions
)
3967 x
= copy_to_reg (x
);
3969 y
= copy_to_reg (y
);
3972 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3974 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3975 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3976 gcc_assert (icode
!= CODE_FOR_nothing
3977 && insn_operand_matches (icode
, 0, test
));
3982 mclass
= GET_MODE_CLASS (mode
);
3983 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3984 FOR_EACH_MODE_FROM (cmp_mode
, mode
)
3986 enum insn_code icode
;
3987 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3988 if (icode
!= CODE_FOR_nothing
3989 && insn_operand_matches (icode
, 0, test
))
3991 rtx_insn
*last
= get_last_insn ();
3992 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3993 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3995 && insn_operand_matches (icode
, 1, op0
)
3996 && insn_operand_matches (icode
, 2, op1
))
3998 XEXP (test
, 0) = op0
;
3999 XEXP (test
, 1) = op1
;
4004 delete_insns_since (last
);
4007 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4011 if (methods
!= OPTAB_LIB_WIDEN
)
4014 if (SCALAR_FLOAT_MODE_P (mode
))
4016 /* Small trick if UNORDERED isn't implemented by the hardware. */
4017 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
4019 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4025 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4030 machine_mode ret_mode
;
4032 /* Handle a libcall just for the mode we are using. */
4033 libfunc
= optab_libfunc (cmp_optab
, mode
);
4034 gcc_assert (libfunc
);
4036 /* If we want unsigned, and this mode has a distinct unsigned
4037 comparison routine, use that. */
4040 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4045 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4046 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4047 ret_mode
, x
, mode
, y
, mode
);
4049 /* There are two kinds of comparison routines. Biased routines
4050 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4051 of gcc expect that the comparison operation is equivalent
4052 to the modified comparison. For signed comparisons compare the
4053 result against 1 in the biased case, and zero in the unbiased
4054 case. For unsigned comparisons always compare against 1 after
4055 biasing the unbiased result by adding 1. This gives us a way to
4057 The comparisons in the fixed-point helper library are always
4062 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4065 x
= plus_constant (ret_mode
, result
, 1);
4071 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4081 /* Before emitting an insn with code ICODE, make sure that X, which is going
4082 to be used for operand OPNUM of the insn, is converted from mode MODE to
4083 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4084 that it is accepted by the operand predicate. Return the new value. */
4087 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4088 machine_mode wider_mode
, int unsignedp
)
4090 if (mode
!= wider_mode
)
4091 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4093 if (!insn_operand_matches (icode
, opnum
, x
))
4095 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4096 if (reload_completed
)
4098 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4100 x
= copy_to_mode_reg (op_mode
, x
);
4106 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4107 we can do the branch. */
4110 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4111 profile_probability prob
)
4113 machine_mode optab_mode
;
4114 enum mode_class mclass
;
4115 enum insn_code icode
;
4118 mclass
= GET_MODE_CLASS (mode
);
4119 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4120 icode
= optab_handler (cbranch_optab
, optab_mode
);
4122 gcc_assert (icode
!= CODE_FOR_nothing
);
4123 gcc_assert (insn_operand_matches (icode
, 0, test
));
4124 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4125 XEXP (test
, 1), label
));
4126 if (prob
.initialized_p ()
4127 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4130 && any_condjump_p (insn
)
4131 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4132 add_reg_br_prob_note (insn
, prob
);
4135 /* Generate code to compare X with Y so that the condition codes are
4136 set and to jump to LABEL if the condition is true. If X is a
4137 constant and Y is not a constant, then the comparison is swapped to
4138 ensure that the comparison RTL has the canonical form.
4140 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4141 need to be widened. UNSIGNEDP is also used to select the proper
4142 branch condition code.
4144 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4146 MODE is the mode of the inputs (in case they are const_int).
4148 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4149 It will be potentially converted into an unsigned variant based on
4150 UNSIGNEDP to select a proper jump instruction.
4152 PROB is the probability of jumping to LABEL. */
4155 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4156 machine_mode mode
, int unsignedp
, rtx label
,
4157 profile_probability prob
)
4159 rtx op0
= x
, op1
= y
;
4162 /* Swap operands and condition to ensure canonical RTL. */
4163 if (swap_commutative_operands_p (x
, y
)
4164 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4167 comparison
= swap_condition (comparison
);
4170 /* If OP0 is still a constant, then both X and Y must be constants
4171 or the opposite comparison is not supported. Force X into a register
4172 to create canonical RTL. */
4173 if (CONSTANT_P (op0
))
4174 op0
= force_reg (mode
, op0
);
4177 comparison
= unsigned_condition (comparison
);
4179 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4181 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4185 /* Emit a library call comparison between floating point X and Y.
4186 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4189 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4190 rtx
*ptest
, machine_mode
*pmode
)
4192 enum rtx_code swapped
= swap_condition (comparison
);
4193 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4194 machine_mode orig_mode
= GET_MODE (x
);
4196 rtx true_rtx
, false_rtx
;
4197 rtx value
, target
, equiv
;
4200 bool reversed_p
= false;
4201 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4203 FOR_EACH_MODE_FROM (mode
, orig_mode
)
4205 if (code_to_optab (comparison
)
4206 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4209 if (code_to_optab (swapped
)
4210 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4213 comparison
= swapped
;
4217 if (code_to_optab (reversed
)
4218 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4220 comparison
= reversed
;
4226 gcc_assert (mode
!= VOIDmode
);
4228 if (mode
!= orig_mode
)
4230 x
= convert_to_mode (mode
, x
, 0);
4231 y
= convert_to_mode (mode
, y
, 0);
4234 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4235 the RTL. The allows the RTL optimizers to delete the libcall if the
4236 condition can be determined at compile-time. */
4237 if (comparison
== UNORDERED
4238 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4240 true_rtx
= const_true_rtx
;
4241 false_rtx
= const0_rtx
;
4248 true_rtx
= const0_rtx
;
4249 false_rtx
= const_true_rtx
;
4253 true_rtx
= const_true_rtx
;
4254 false_rtx
= const0_rtx
;
4258 true_rtx
= const1_rtx
;
4259 false_rtx
= const0_rtx
;
4263 true_rtx
= const0_rtx
;
4264 false_rtx
= constm1_rtx
;
4268 true_rtx
= constm1_rtx
;
4269 false_rtx
= const0_rtx
;
4273 true_rtx
= const0_rtx
;
4274 false_rtx
= const1_rtx
;
4282 if (comparison
== UNORDERED
)
4284 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4285 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4286 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4287 temp
, const_true_rtx
, equiv
);
4291 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4292 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4293 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4294 equiv
, true_rtx
, false_rtx
);
4298 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4299 cmp_mode
, x
, mode
, y
, mode
);
4300 insns
= get_insns ();
4303 target
= gen_reg_rtx (cmp_mode
);
4304 emit_libcall_block (insns
, target
, value
, equiv
);
4306 if (comparison
== UNORDERED
4307 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4309 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4311 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4316 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4319 emit_indirect_jump (rtx loc
)
4321 if (!targetm
.have_indirect_jump ())
4322 sorry ("indirect jumps are not available on this target");
4325 class expand_operand ops
[1];
4326 create_address_operand (&ops
[0], loc
);
4327 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4333 /* Emit a conditional move instruction if the machine supports one for that
4334 condition and machine mode.
4336 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4337 the mode to use should they be constants. If it is VOIDmode, they cannot
4340 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4341 should be stored there. MODE is the mode to use should they be constants.
4342 If it is VOIDmode, they cannot both be constants.
4344 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4345 is not supported. */
4348 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4349 machine_mode cmode
, rtx op2
, rtx op3
,
4350 machine_mode mode
, int unsignedp
)
4354 enum insn_code icode
;
4355 enum rtx_code reversed
;
4357 /* If the two source operands are identical, that's just a move. */
4359 if (rtx_equal_p (op2
, op3
))
4362 target
= gen_reg_rtx (mode
);
4364 emit_move_insn (target
, op3
);
4368 /* If one operand is constant, make it the second one. Only do this
4369 if the other operand is not constant as well. */
4371 if (swap_commutative_operands_p (op0
, op1
))
4373 std::swap (op0
, op1
);
4374 code
= swap_condition (code
);
4377 /* get_condition will prefer to generate LT and GT even if the old
4378 comparison was against zero, so undo that canonicalization here since
4379 comparisons against zero are cheaper. */
4380 if (code
== LT
&& op1
== const1_rtx
)
4381 code
= LE
, op1
= const0_rtx
;
4382 else if (code
== GT
&& op1
== constm1_rtx
)
4383 code
= GE
, op1
= const0_rtx
;
4385 if (cmode
== VOIDmode
)
4386 cmode
= GET_MODE (op0
);
4388 enum rtx_code orig_code
= code
;
4389 bool swapped
= false;
4390 if (swap_commutative_operands_p (op2
, op3
)
4391 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4394 std::swap (op2
, op3
);
4399 if (mode
== VOIDmode
)
4400 mode
= GET_MODE (op2
);
4402 icode
= direct_optab_handler (movcc_optab
, mode
);
4404 if (icode
== CODE_FOR_nothing
)
4408 target
= gen_reg_rtx (mode
);
4410 for (int pass
= 0; ; pass
++)
4412 code
= unsignedp
? unsigned_condition (code
) : code
;
4413 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4415 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4416 punt and let the caller figure out how best to deal with this
4418 if (COMPARISON_P (comparison
))
4420 saved_pending_stack_adjust save
;
4421 save_pending_stack_adjust (&save
);
4422 last
= get_last_insn ();
4423 do_pending_stack_adjust ();
4424 machine_mode cmpmode
= cmode
;
4425 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4426 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4427 OPTAB_WIDEN
, &comparison
, &cmpmode
);
4430 class expand_operand ops
[4];
4432 create_output_operand (&ops
[0], target
, mode
);
4433 create_fixed_operand (&ops
[1], comparison
);
4434 create_input_operand (&ops
[2], op2
, mode
);
4435 create_input_operand (&ops
[3], op3
, mode
);
4436 if (maybe_expand_insn (icode
, 4, ops
))
4438 if (ops
[0].value
!= target
)
4439 convert_move (target
, ops
[0].value
, false);
4443 delete_insns_since (last
);
4444 restore_pending_stack_adjust (&save
);
4450 /* If the preferred op2/op3 order is not usable, retry with other
4451 operand order, perhaps it will expand successfully. */
4454 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4460 std::swap (op2
, op3
);
4465 /* Emit a conditional negate or bitwise complement using the
4466 negcc or notcc optabs if available. Return NULL_RTX if such operations
4467 are not available. Otherwise return the RTX holding the result.
4468 TARGET is the desired destination of the result. COMP is the comparison
4469 on which to negate. If COND is true move into TARGET the negation
4470 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4471 CODE is either NEG or NOT. MODE is the machine mode in which the
4472 operation is performed. */
4475 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4476 machine_mode mode
, rtx cond
, rtx op1
,
4479 optab op
= unknown_optab
;
4482 else if (code
== NOT
)
4487 insn_code icode
= direct_optab_handler (op
, mode
);
4489 if (icode
== CODE_FOR_nothing
)
4493 target
= gen_reg_rtx (mode
);
4495 rtx_insn
*last
= get_last_insn ();
4496 class expand_operand ops
[4];
4498 create_output_operand (&ops
[0], target
, mode
);
4499 create_fixed_operand (&ops
[1], cond
);
4500 create_input_operand (&ops
[2], op1
, mode
);
4501 create_input_operand (&ops
[3], op2
, mode
);
4503 if (maybe_expand_insn (icode
, 4, ops
))
4505 if (ops
[0].value
!= target
)
4506 convert_move (target
, ops
[0].value
, false);
4510 delete_insns_since (last
);
4514 /* Emit a conditional addition instruction if the machine supports one for that
4515 condition and machine mode.
4517 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4518 the mode to use should they be constants. If it is VOIDmode, they cannot
4521 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4522 should be stored there. MODE is the mode to use should they be constants.
4523 If it is VOIDmode, they cannot both be constants.
4525 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4526 is not supported. */
4529 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4530 machine_mode cmode
, rtx op2
, rtx op3
,
4531 machine_mode mode
, int unsignedp
)
4535 enum insn_code icode
;
4537 /* If one operand is constant, make it the second one. Only do this
4538 if the other operand is not constant as well. */
4540 if (swap_commutative_operands_p (op0
, op1
))
4542 std::swap (op0
, op1
);
4543 code
= swap_condition (code
);
4546 /* get_condition will prefer to generate LT and GT even if the old
4547 comparison was against zero, so undo that canonicalization here since
4548 comparisons against zero are cheaper. */
4549 if (code
== LT
&& op1
== const1_rtx
)
4550 code
= LE
, op1
= const0_rtx
;
4551 else if (code
== GT
&& op1
== constm1_rtx
)
4552 code
= GE
, op1
= const0_rtx
;
4554 if (cmode
== VOIDmode
)
4555 cmode
= GET_MODE (op0
);
4557 if (mode
== VOIDmode
)
4558 mode
= GET_MODE (op2
);
4560 icode
= optab_handler (addcc_optab
, mode
);
4562 if (icode
== CODE_FOR_nothing
)
4566 target
= gen_reg_rtx (mode
);
4568 code
= unsignedp
? unsigned_condition (code
) : code
;
4569 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4571 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4572 return NULL and let the caller figure out how best to deal with this
4574 if (!COMPARISON_P (comparison
))
4577 do_pending_stack_adjust ();
4578 last
= get_last_insn ();
4579 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4580 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4581 &comparison
, &cmode
);
4584 class expand_operand ops
[4];
4586 create_output_operand (&ops
[0], target
, mode
);
4587 create_fixed_operand (&ops
[1], comparison
);
4588 create_input_operand (&ops
[2], op2
, mode
);
4589 create_input_operand (&ops
[3], op3
, mode
);
4590 if (maybe_expand_insn (icode
, 4, ops
))
4592 if (ops
[0].value
!= target
)
4593 convert_move (target
, ops
[0].value
, false);
4597 delete_insns_since (last
);
4601 /* These functions attempt to generate an insn body, rather than
4602 emitting the insn, but if the gen function already emits them, we
4603 make no attempt to turn them back into naked patterns. */
4605 /* Generate and return an insn body to add Y to X. */
4608 gen_add2_insn (rtx x
, rtx y
)
4610 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4612 gcc_assert (insn_operand_matches (icode
, 0, x
));
4613 gcc_assert (insn_operand_matches (icode
, 1, x
));
4614 gcc_assert (insn_operand_matches (icode
, 2, y
));
4616 return GEN_FCN (icode
) (x
, x
, y
);
4619 /* Generate and return an insn body to add r1 and c,
4620 storing the result in r0. */
4623 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4625 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4627 if (icode
== CODE_FOR_nothing
4628 || !insn_operand_matches (icode
, 0, r0
)
4629 || !insn_operand_matches (icode
, 1, r1
)
4630 || !insn_operand_matches (icode
, 2, c
))
4633 return GEN_FCN (icode
) (r0
, r1
, c
);
4637 have_add2_insn (rtx x
, rtx y
)
4639 enum insn_code icode
;
4641 gcc_assert (GET_MODE (x
) != VOIDmode
);
4643 icode
= optab_handler (add_optab
, GET_MODE (x
));
4645 if (icode
== CODE_FOR_nothing
)
4648 if (!insn_operand_matches (icode
, 0, x
)
4649 || !insn_operand_matches (icode
, 1, x
)
4650 || !insn_operand_matches (icode
, 2, y
))
4656 /* Generate and return an insn body to add Y to X. */
4659 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4661 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4663 gcc_assert (insn_operand_matches (icode
, 0, x
));
4664 gcc_assert (insn_operand_matches (icode
, 1, y
));
4665 gcc_assert (insn_operand_matches (icode
, 2, z
));
4667 return GEN_FCN (icode
) (x
, y
, z
);
4670 /* Return true if the target implements an addptr pattern and X, Y,
4671 and Z are valid for the pattern predicates. */
4674 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4676 enum insn_code icode
;
4678 gcc_assert (GET_MODE (x
) != VOIDmode
);
4680 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4682 if (icode
== CODE_FOR_nothing
)
4685 if (!insn_operand_matches (icode
, 0, x
)
4686 || !insn_operand_matches (icode
, 1, y
)
4687 || !insn_operand_matches (icode
, 2, z
))
4693 /* Generate and return an insn body to subtract Y from X. */
4696 gen_sub2_insn (rtx x
, rtx y
)
4698 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4700 gcc_assert (insn_operand_matches (icode
, 0, x
));
4701 gcc_assert (insn_operand_matches (icode
, 1, x
));
4702 gcc_assert (insn_operand_matches (icode
, 2, y
));
4704 return GEN_FCN (icode
) (x
, x
, y
);
4707 /* Generate and return an insn body to subtract r1 and c,
4708 storing the result in r0. */
4711 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4713 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4715 if (icode
== CODE_FOR_nothing
4716 || !insn_operand_matches (icode
, 0, r0
)
4717 || !insn_operand_matches (icode
, 1, r1
)
4718 || !insn_operand_matches (icode
, 2, c
))
4721 return GEN_FCN (icode
) (r0
, r1
, c
);
4725 have_sub2_insn (rtx x
, rtx y
)
4727 enum insn_code icode
;
4729 gcc_assert (GET_MODE (x
) != VOIDmode
);
4731 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4733 if (icode
== CODE_FOR_nothing
)
4736 if (!insn_operand_matches (icode
, 0, x
)
4737 || !insn_operand_matches (icode
, 1, x
)
4738 || !insn_operand_matches (icode
, 2, y
))
4744 /* Generate the body of an insn to extend Y (with mode MFROM)
4745 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4748 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4749 machine_mode mfrom
, int unsignedp
)
4751 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4752 return GEN_FCN (icode
) (x
, y
);
4755 /* Generate code to convert FROM to floating point
4756 and store in TO. FROM must be fixed point and not VOIDmode.
4757 UNSIGNEDP nonzero means regard FROM as unsigned.
4758 Normally this is done by correcting the final value
4759 if it is negative. */
4762 expand_float (rtx to
, rtx from
, int unsignedp
)
4764 enum insn_code icode
;
4766 scalar_mode from_mode
, to_mode
;
4767 machine_mode fmode
, imode
;
4768 bool can_do_signed
= false;
4770 /* Crash now, because we won't be able to decide which mode to use. */
4771 gcc_assert (GET_MODE (from
) != VOIDmode
);
4773 /* Look for an insn to do the conversion. Do it in the specified
4774 modes if possible; otherwise convert either input, output or both to
4775 wider mode. If the integer mode is wider than the mode of FROM,
4776 we can do the conversion signed even if the input is unsigned. */
4778 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
4779 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
4781 int doing_unsigned
= unsignedp
;
4783 if (fmode
!= GET_MODE (to
)
4784 && (significand_size (fmode
)
4785 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
4788 icode
= can_float_p (fmode
, imode
, unsignedp
);
4789 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4791 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4792 if (scode
!= CODE_FOR_nothing
)
4793 can_do_signed
= true;
4794 if (imode
!= GET_MODE (from
))
4795 icode
= scode
, doing_unsigned
= 0;
4798 if (icode
!= CODE_FOR_nothing
)
4800 if (imode
!= GET_MODE (from
))
4801 from
= convert_to_mode (imode
, from
, unsignedp
);
4803 if (fmode
!= GET_MODE (to
))
4804 target
= gen_reg_rtx (fmode
);
4806 emit_unop_insn (icode
, target
, from
,
4807 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4810 convert_move (to
, target
, 0);
4815 /* Unsigned integer, and no way to convert directly. Convert as signed,
4816 then unconditionally adjust the result. */
4819 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
4820 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
4822 opt_scalar_mode fmode_iter
;
4823 rtx_code_label
*label
= gen_label_rtx ();
4825 REAL_VALUE_TYPE offset
;
4827 /* Look for a usable floating mode FMODE wider than the source and at
4828 least as wide as the target. Using FMODE will avoid rounding woes
4829 with unsigned values greater than the signed maximum value. */
4831 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
4833 scalar_mode fmode
= fmode_iter
.require ();
4834 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
4835 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
4839 if (!fmode_iter
.exists (&fmode
))
4841 /* There is no such mode. Pretend the target is wide enough. */
4844 /* Avoid double-rounding when TO is narrower than FROM. */
4845 if ((significand_size (fmode
) + 1)
4846 < GET_MODE_PRECISION (from_mode
))
4849 rtx_code_label
*neglabel
= gen_label_rtx ();
4851 /* Don't use TARGET if it isn't a register, is a hard register,
4852 or is the wrong mode. */
4854 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4855 || GET_MODE (target
) != fmode
)
4856 target
= gen_reg_rtx (fmode
);
4859 do_pending_stack_adjust ();
4861 /* Test whether the sign bit is set. */
4862 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4865 /* The sign bit is not set. Convert as signed. */
4866 expand_float (target
, from
, 0);
4867 emit_jump_insn (targetm
.gen_jump (label
));
4870 /* The sign bit is set.
4871 Convert to a usable (positive signed) value by shifting right
4872 one bit, while remembering if a nonzero bit was shifted
4873 out; i.e., compute (from & 1) | (from >> 1). */
4875 emit_label (neglabel
);
4876 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4877 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4878 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4879 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4881 expand_float (target
, temp
, 0);
4883 /* Multiply by 2 to undo the shift above. */
4884 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4885 target
, 0, OPTAB_LIB_WIDEN
);
4887 emit_move_insn (target
, temp
);
4889 do_pending_stack_adjust ();
4895 /* If we are about to do some arithmetic to correct for an
4896 unsigned operand, do it in a pseudo-register. */
4898 if (to_mode
!= fmode
4899 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4900 target
= gen_reg_rtx (fmode
);
4902 /* Convert as signed integer to floating. */
4903 expand_float (target
, from
, 0);
4905 /* If FROM is negative (and therefore TO is negative),
4906 correct its value by 2**bitwidth. */
4908 do_pending_stack_adjust ();
4909 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
4913 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
4914 temp
= expand_binop (fmode
, add_optab
, target
,
4915 const_double_from_real_value (offset
, fmode
),
4916 target
, 0, OPTAB_LIB_WIDEN
);
4918 emit_move_insn (target
, temp
);
4920 do_pending_stack_adjust ();
4925 /* No hardware instruction available; call a library routine. */
4930 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4932 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
4933 from
= convert_to_mode (SImode
, from
, unsignedp
);
4935 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4936 gcc_assert (libfunc
);
4940 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4941 GET_MODE (to
), from
, GET_MODE (from
));
4942 insns
= get_insns ();
4945 emit_libcall_block (insns
, target
, value
,
4946 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4947 GET_MODE (to
), from
));
4952 /* Copy result to requested destination
4953 if we have been computing in a temp location. */
4957 if (GET_MODE (target
) == GET_MODE (to
))
4958 emit_move_insn (to
, target
);
4960 convert_move (to
, target
, 0);
4964 /* Generate code to convert FROM to fixed point and store in TO. FROM
4965 must be floating point. */
4968 expand_fix (rtx to
, rtx from
, int unsignedp
)
4970 enum insn_code icode
;
4972 machine_mode fmode
, imode
;
4973 opt_scalar_mode fmode_iter
;
4974 bool must_trunc
= false;
4976 /* We first try to find a pair of modes, one real and one integer, at
4977 least as wide as FROM and TO, respectively, in which we can open-code
4978 this conversion. If the integer mode is wider than the mode of TO,
4979 we can do the conversion either signed or unsigned. */
4981 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
4982 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
4984 int doing_unsigned
= unsignedp
;
4986 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4987 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4988 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4990 if (icode
!= CODE_FOR_nothing
)
4992 rtx_insn
*last
= get_last_insn ();
4993 if (fmode
!= GET_MODE (from
))
4994 from
= convert_to_mode (fmode
, from
, 0);
4998 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4999 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5003 if (imode
!= GET_MODE (to
))
5004 target
= gen_reg_rtx (imode
);
5006 if (maybe_emit_unop_insn (icode
, target
, from
,
5007 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5010 convert_move (to
, target
, unsignedp
);
5013 delete_insns_since (last
);
5017 /* For an unsigned conversion, there is one more way to do it.
5018 If we have a signed conversion, we generate code that compares
5019 the real value to the largest representable positive number. If if
5020 is smaller, the conversion is done normally. Otherwise, subtract
5021 one plus the highest signed number, convert, and add it back.
5023 We only need to check all real modes, since we know we didn't find
5024 anything with a wider integer mode.
5026 This code used to extend FP value into mode wider than the destination.
5027 This is needed for decimal float modes which cannot accurately
5028 represent one plus the highest signed number of the same size, but
5029 not for binary modes. Consider, for instance conversion from SFmode
5032 The hot path through the code is dealing with inputs smaller than 2^63
5033 and doing just the conversion, so there is no bits to lose.
5035 In the other path we know the value is positive in the range 2^63..2^64-1
5036 inclusive. (as for other input overflow happens and result is undefined)
5037 So we know that the most important bit set in mantissa corresponds to
5038 2^63. The subtraction of 2^63 should not generate any rounding as it
5039 simply clears out that bit. The rest is trivial. */
5041 scalar_int_mode to_mode
;
5043 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5044 && HWI_COMPUTABLE_MODE_P (to_mode
))
5045 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5047 scalar_mode fmode
= fmode_iter
.require ();
5048 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5050 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5051 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5054 REAL_VALUE_TYPE offset
;
5056 rtx_code_label
*lab1
, *lab2
;
5059 bitsize
= GET_MODE_PRECISION (to_mode
);
5060 real_2expN (&offset
, bitsize
- 1, fmode
);
5061 limit
= const_double_from_real_value (offset
, fmode
);
5062 lab1
= gen_label_rtx ();
5063 lab2
= gen_label_rtx ();
5065 if (fmode
!= GET_MODE (from
))
5066 from
= convert_to_mode (fmode
, from
, 0);
5068 /* See if we need to do the subtraction. */
5069 do_pending_stack_adjust ();
5070 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5071 GET_MODE (from
), 0, lab1
);
5073 /* If not, do the signed "fix" and branch around fixup code. */
5074 expand_fix (to
, from
, 0);
5075 emit_jump_insn (targetm
.gen_jump (lab2
));
5078 /* Otherwise, subtract 2**(N-1), convert to signed number,
5079 then add 2**(N-1). Do the addition using XOR since this
5080 will often generate better code. */
5082 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5083 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5084 expand_fix (to
, target
, 0);
5085 target
= expand_binop (to_mode
, xor_optab
, to
,
5087 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5089 to
, 1, OPTAB_LIB_WIDEN
);
5092 emit_move_insn (to
, target
);
5096 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5098 /* Make a place for a REG_NOTE and add it. */
5099 insn
= emit_move_insn (to
, to
);
5100 set_dst_reg_note (insn
, REG_EQUAL
,
5101 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5110 /* We can't do it with an insn, so use a library call. But first ensure
5111 that the mode of TO is at least as wide as SImode, since those are the
5112 only library calls we know about. */
5114 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5116 target
= gen_reg_rtx (SImode
);
5118 expand_fix (target
, from
, unsignedp
);
5126 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5127 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5128 gcc_assert (libfunc
);
5132 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5133 GET_MODE (to
), from
, GET_MODE (from
));
5134 insns
= get_insns ();
5137 emit_libcall_block (insns
, target
, value
,
5138 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5139 GET_MODE (to
), from
));
5144 if (GET_MODE (to
) == GET_MODE (target
))
5145 emit_move_insn (to
, target
);
5147 convert_move (to
, target
, 0);
5152 /* Promote integer arguments for a libcall if necessary.
5153 emit_library_call_value cannot do the promotion because it does not
5154 know if it should do a signed or unsigned promotion. This is because
5155 there are no tree types defined for libcalls. */
5158 prepare_libcall_arg (rtx arg
, int uintp
)
5160 scalar_int_mode mode
;
5161 machine_mode arg_mode
;
5162 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5164 /* If we need to promote the integer function argument we need to do
5165 it here instead of inside emit_library_call_value because in
5166 emit_library_call_value we don't know if we should do a signed or
5167 unsigned promotion. */
5170 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5171 &unsigned_p
, NULL_TREE
, 0);
5172 if (arg_mode
!= mode
)
5173 return convert_to_mode (arg_mode
, arg
, uintp
);
5178 /* Generate code to convert FROM or TO a fixed-point.
5179 If UINTP is true, either TO or FROM is an unsigned integer.
5180 If SATP is true, we need to saturate the result. */
5183 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5185 machine_mode to_mode
= GET_MODE (to
);
5186 machine_mode from_mode
= GET_MODE (from
);
5188 enum rtx_code this_code
;
5189 enum insn_code code
;
5194 if (to_mode
== from_mode
)
5196 emit_move_insn (to
, from
);
5202 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5203 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5207 tab
= satp
? satfract_optab
: fract_optab
;
5208 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5210 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5211 if (code
!= CODE_FOR_nothing
)
5213 emit_unop_insn (code
, to
, from
, this_code
);
5217 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5218 gcc_assert (libfunc
);
5220 from
= prepare_libcall_arg (from
, uintp
);
5221 from_mode
= GET_MODE (from
);
5224 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5226 insns
= get_insns ();
5229 emit_libcall_block (insns
, to
, value
,
5230 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5233 /* Generate code to convert FROM to fixed point and store in TO. FROM
5234 must be floating point, TO must be signed. Use the conversion optab
5235 TAB to do the conversion. */
5238 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5240 enum insn_code icode
;
5242 machine_mode fmode
, imode
;
5244 /* We first try to find a pair of modes, one real and one integer, at
5245 least as wide as FROM and TO, respectively, in which we can open-code
5246 this conversion. If the integer mode is wider than the mode of TO,
5247 we can do the conversion either signed or unsigned. */
5249 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5250 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5252 icode
= convert_optab_handler (tab
, imode
, fmode
);
5253 if (icode
!= CODE_FOR_nothing
)
5255 rtx_insn
*last
= get_last_insn ();
5256 if (fmode
!= GET_MODE (from
))
5257 from
= convert_to_mode (fmode
, from
, 0);
5259 if (imode
!= GET_MODE (to
))
5260 target
= gen_reg_rtx (imode
);
5262 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5264 delete_insns_since (last
);
5268 convert_move (to
, target
, 0);
5276 /* Report whether we have an instruction to perform the operation
5277 specified by CODE on operands of mode MODE. */
5279 have_insn_for (enum rtx_code code
, machine_mode mode
)
5281 return (code_to_optab (code
)
5282 && (optab_handler (code_to_optab (code
), mode
)
5283 != CODE_FOR_nothing
));
5286 /* Print information about the current contents of the optabs on
5290 debug_optab_libfuncs (void)
5294 /* Dump the arithmetic optabs. */
5295 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5296 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5298 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5301 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5302 fprintf (stderr
, "%s\t%s:\t%s\n",
5303 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5309 /* Dump the conversion optabs. */
5310 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5311 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5312 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5314 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5318 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5319 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5320 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5328 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5329 CODE. Return 0 on failure. */
5332 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5334 machine_mode mode
= GET_MODE (op1
);
5335 enum insn_code icode
;
5339 if (mode
== VOIDmode
)
5342 icode
= optab_handler (ctrap_optab
, mode
);
5343 if (icode
== CODE_FOR_nothing
)
5346 /* Some targets only accept a zero trap code. */
5347 if (!insn_operand_matches (icode
, 3, tcode
))
5350 do_pending_stack_adjust ();
5352 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5357 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5360 /* If that failed, then give up. */
5368 insn
= get_insns ();
5373 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5374 or unsigned operation code. */
5377 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5389 code
= unsignedp
? LTU
: LT
;
5392 code
= unsignedp
? LEU
: LE
;
5395 code
= unsignedp
? GTU
: GT
;
5398 code
= unsignedp
? GEU
: GE
;
5401 case UNORDERED_EXPR
:
5440 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5441 select signed or unsigned operators. OPNO holds the index of the
5442 first comparison operand for insn ICODE. Do not generate the
5443 compare instruction itself. */
5446 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5447 tree t_op0
, tree t_op1
, bool unsignedp
,
5448 enum insn_code icode
, unsigned int opno
)
5450 class expand_operand ops
[2];
5451 rtx rtx_op0
, rtx_op1
;
5452 machine_mode m0
, m1
;
5453 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5455 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5457 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5458 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5459 cases, use the original mode. */
5460 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5462 m0
= GET_MODE (rtx_op0
);
5464 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5466 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5468 m1
= GET_MODE (rtx_op1
);
5470 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5472 create_input_operand (&ops
[0], rtx_op0
, m0
);
5473 create_input_operand (&ops
[1], rtx_op1
, m1
);
5474 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5476 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5479 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5480 the first vec_perm operand, assuming the second operand (for left shift
5481 first operand) is a constant vector of zeros. Return the shift distance
5482 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
5483 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
5484 shift or vec_shl_optab for left shift. */
5486 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
,
5489 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
5490 poly_int64 first
= sel
[0];
5491 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
5494 if (shift_optab
== vec_shl_optab
)
5497 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5499 unsigned firstidx
= 0;
5500 for (unsigned int i
= 0; i
< nelt
; i
++)
5502 if (known_eq (sel
[i
], nelt
))
5504 if (i
== 0 || firstidx
)
5509 ? maybe_ne (sel
[i
], nelt
+ i
- firstidx
)
5510 : maybe_ge (sel
[i
], nelt
))
5518 else if (!sel
.series_p (0, 1, first
, 1))
5521 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5523 for (unsigned int i
= 1; i
< nelt
; i
++)
5525 poly_int64 expected
= i
+ first
;
5526 /* Indices into the second vector are all equivalent. */
5527 if (maybe_lt (sel
[i
], nelt
)
5528 ? maybe_ne (sel
[i
], expected
)
5529 : maybe_lt (expected
, nelt
))
5534 return gen_int_shift_amount (mode
, first
* bitsize
);
5537 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5540 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5541 rtx v0
, rtx v1
, rtx sel
)
5543 machine_mode tmode
= GET_MODE (target
);
5544 machine_mode smode
= GET_MODE (sel
);
5545 class expand_operand ops
[4];
5547 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
5548 || related_int_vector_mode (tmode
).require () == smode
);
5549 create_output_operand (&ops
[0], target
, tmode
);
5550 create_input_operand (&ops
[3], sel
, smode
);
5552 /* Make an effort to preserve v0 == v1. The target expander is able to
5553 rely on this to determine if we're permuting a single input operand. */
5554 if (rtx_equal_p (v0
, v1
))
5556 if (!insn_operand_matches (icode
, 1, v0
))
5557 v0
= force_reg (tmode
, v0
);
5558 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5559 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5561 create_fixed_operand (&ops
[1], v0
);
5562 create_fixed_operand (&ops
[2], v0
);
5566 create_input_operand (&ops
[1], v0
, tmode
);
5567 create_input_operand (&ops
[2], v1
, tmode
);
5570 if (maybe_expand_insn (icode
, 4, ops
))
5571 return ops
[0].value
;
5575 /* Implement a permutation of vectors v0 and v1 using the permutation
5576 vector in SEL and return the result. Use TARGET to hold the result
5577 if nonnull and convenient.
5579 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5580 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5581 to have a particular mode. */
5584 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
5585 const vec_perm_builder
&sel
, machine_mode sel_mode
,
5588 if (!target
|| !register_operand (target
, mode
))
5589 target
= gen_reg_rtx (mode
);
5591 /* Set QIMODE to a different vector mode with byte elements.
5592 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5593 machine_mode qimode
;
5594 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
5597 rtx_insn
*last
= get_last_insn ();
5599 bool single_arg_p
= rtx_equal_p (v0
, v1
);
5600 /* Always specify two input vectors here and leave the target to handle
5601 cases in which the inputs are equal. Not all backends can cope with
5602 the single-input representation when testing for a double-input
5603 target instruction. */
5604 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
5606 /* See if this can be handled with a vec_shr or vec_shl. We only do this
5607 if the second (for vec_shr) or first (for vec_shl) vector is all
5609 insn_code shift_code
= CODE_FOR_nothing
;
5610 insn_code shift_code_qi
= CODE_FOR_nothing
;
5611 optab shift_optab
= unknown_optab
;
5613 if (v1
== CONST0_RTX (GET_MODE (v1
)))
5614 shift_optab
= vec_shr_optab
;
5615 else if (v0
== CONST0_RTX (GET_MODE (v0
)))
5617 shift_optab
= vec_shl_optab
;
5620 if (shift_optab
!= unknown_optab
)
5622 shift_code
= optab_handler (shift_optab
, mode
);
5623 shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5624 ? optab_handler (shift_optab
, qimode
)
5625 : CODE_FOR_nothing
);
5627 if (shift_code
!= CODE_FOR_nothing
|| shift_code_qi
!= CODE_FOR_nothing
)
5629 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
, shift_optab
);
5632 class expand_operand ops
[3];
5633 if (shift_amt
== const0_rtx
)
5635 if (shift_code
!= CODE_FOR_nothing
)
5637 create_output_operand (&ops
[0], target
, mode
);
5638 create_input_operand (&ops
[1], v2
, mode
);
5639 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
5640 if (maybe_expand_insn (shift_code
, 3, ops
))
5641 return ops
[0].value
;
5643 if (shift_code_qi
!= CODE_FOR_nothing
)
5645 rtx tmp
= gen_reg_rtx (qimode
);
5646 create_output_operand (&ops
[0], tmp
, qimode
);
5647 create_input_operand (&ops
[1], gen_lowpart (qimode
, v2
), qimode
);
5648 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
5649 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5650 return gen_lowpart (mode
, ops
[0].value
);
5655 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
5657 v0
= force_reg (mode
, v0
);
5661 v1
= force_reg (mode
, v1
);
5663 if (targetm
.vectorize
.vec_perm_const (mode
, target
, v0
, v1
, indices
))
5667 /* Fall back to a constant byte-based permutation. */
5668 vec_perm_indices qimode_indices
;
5669 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
5670 if (qimode
!= VOIDmode
)
5672 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
5673 target_qi
= gen_reg_rtx (qimode
);
5674 v0_qi
= gen_lowpart (qimode
, v0
);
5675 v1_qi
= gen_lowpart (qimode
, v1
);
5676 if (targetm
.vectorize
.vec_perm_const
!= NULL
5677 && targetm
.vectorize
.vec_perm_const (qimode
, target_qi
, v0_qi
,
5678 v1_qi
, qimode_indices
))
5679 return gen_lowpart (mode
, target_qi
);
5682 /* Otherwise expand as a fully variable permuation. */
5684 /* The optabs are only defined for selectors with the same width
5685 as the values being permuted. */
5686 machine_mode required_sel_mode
;
5687 if (!related_int_vector_mode (mode
).exists (&required_sel_mode
))
5689 delete_insns_since (last
);
5693 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
5694 If that isn't the mode we want then we need to prove that using
5695 REQUIRED_SEL_MODE is OK. */
5696 if (sel_mode
!= required_sel_mode
)
5698 if (!selector_fits_mode_p (required_sel_mode
, indices
))
5700 delete_insns_since (last
);
5703 sel_mode
= required_sel_mode
;
5706 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
5707 if (icode
!= CODE_FOR_nothing
)
5709 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
5710 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
5715 if (qimode
!= VOIDmode
5716 && selector_fits_mode_p (qimode
, qimode_indices
))
5718 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5719 if (icode
!= CODE_FOR_nothing
)
5721 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
5722 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
5724 return gen_lowpart (mode
, tmp
);
5728 delete_insns_since (last
);
5732 /* Implement a permutation of vectors v0 and v1 using the permutation
5733 vector in SEL and return the result. Use TARGET to hold the result
5734 if nonnull and convenient.
5736 MODE is the mode of the vectors being permuted (V0 and V1).
5737 SEL must have the integer equivalent of MODE and is known to be
5738 unsuitable for permutes with a constant permutation vector. */
5741 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5743 enum insn_code icode
;
5747 u
= GET_MODE_UNIT_SIZE (mode
);
5749 if (!target
|| GET_MODE (target
) != mode
)
5750 target
= gen_reg_rtx (mode
);
5752 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5753 if (icode
!= CODE_FOR_nothing
)
5755 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5760 /* As a special case to aid several targets, lower the element-based
5761 permutation to a byte-based permutation and try again. */
5762 machine_mode qimode
;
5763 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
5764 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
5766 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5767 if (icode
== CODE_FOR_nothing
)
5770 /* Multiply each element by its byte size. */
5771 machine_mode selmode
= GET_MODE (sel
);
5773 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5774 NULL
, 0, OPTAB_DIRECT
);
5776 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5777 gen_int_shift_amount (selmode
, exact_log2 (u
)),
5778 NULL
, 0, OPTAB_DIRECT
);
5779 gcc_assert (sel
!= NULL
);
5781 /* Broadcast the low byte each element into each of its bytes.
5782 The encoding has U interleaved stepped patterns, one for each
5783 byte of an element. */
5784 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
5785 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
5786 for (i
= 0; i
< 3; ++i
)
5787 for (unsigned int j
= 0; j
< u
; ++j
)
5788 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
5789 sel
= gen_lowpart (qimode
, sel
);
5790 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
5791 gcc_assert (sel
!= NULL
);
5793 /* Add the byte offset to each byte element. */
5794 /* Note that the definition of the indicies here is memory ordering,
5795 so there should be no difference between big and little endian. */
5796 rtx_vector_builder
byte_indices (qimode
, u
, 1);
5797 for (i
= 0; i
< u
; ++i
)
5798 byte_indices
.quick_push (GEN_INT (i
));
5799 tmp
= byte_indices
.build ();
5800 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5801 sel
, 0, OPTAB_DIRECT
);
5802 gcc_assert (sel_qi
!= NULL
);
5804 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5805 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5806 gen_lowpart (qimode
, v1
), sel_qi
);
5808 tmp
= gen_lowpart (mode
, tmp
);
5812 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5813 Use TARGET for the result if nonnull and convenient. */
5816 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
5818 class expand_operand ops
[3];
5819 enum insn_code icode
;
5820 machine_mode emode
= GET_MODE_INNER (vmode
);
5822 icode
= direct_optab_handler (vec_series_optab
, vmode
);
5823 gcc_assert (icode
!= CODE_FOR_nothing
);
5825 create_output_operand (&ops
[0], target
, vmode
);
5826 create_input_operand (&ops
[1], op0
, emode
);
5827 create_input_operand (&ops
[2], op1
, emode
);
5829 expand_insn (icode
, 3, ops
);
5830 return ops
[0].value
;
5833 /* Generate insns for a vector comparison into a mask. */
5836 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5838 class expand_operand ops
[4];
5839 enum insn_code icode
;
5841 machine_mode mask_mode
= TYPE_MODE (type
);
5845 enum tree_code tcode
;
5847 op0a
= TREE_OPERAND (exp
, 0);
5848 op0b
= TREE_OPERAND (exp
, 1);
5849 tcode
= TREE_CODE (exp
);
5851 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5852 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5854 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5855 if (icode
== CODE_FOR_nothing
)
5857 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5858 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
5859 if (icode
== CODE_FOR_nothing
)
5863 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
5864 unsignedp
, icode
, 2);
5865 create_output_operand (&ops
[0], target
, mask_mode
);
5866 create_fixed_operand (&ops
[1], comparison
);
5867 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5868 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5869 expand_insn (icode
, 4, ops
);
5870 return ops
[0].value
;
5873 /* Expand a highpart multiply. */
5876 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5877 rtx target
, bool uns_p
)
5879 class expand_operand eops
[3];
5880 enum insn_code icode
;
5886 method
= can_mult_highpart_p (mode
, uns_p
);
5892 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5893 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5896 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5897 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5900 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5901 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5902 if (BYTES_BIG_ENDIAN
)
5903 std::swap (tab1
, tab2
);
5909 icode
= optab_handler (tab1
, mode
);
5910 wmode
= insn_data
[icode
].operand
[0].mode
;
5911 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
5912 GET_MODE_NUNITS (mode
)));
5913 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
5915 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5916 create_input_operand (&eops
[1], op0
, mode
);
5917 create_input_operand (&eops
[2], op1
, mode
);
5918 expand_insn (icode
, 3, eops
);
5919 m1
= gen_lowpart (mode
, eops
[0].value
);
5921 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5922 create_input_operand (&eops
[1], op0
, mode
);
5923 create_input_operand (&eops
[2], op1
, mode
);
5924 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5925 m2
= gen_lowpart (mode
, eops
[0].value
);
5927 vec_perm_builder sel
;
5930 /* The encoding has 2 interleaved stepped patterns. */
5931 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
5932 for (i
= 0; i
< 6; ++i
)
5933 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5934 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
5938 /* The encoding has a single interleaved stepped pattern. */
5939 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
5940 for (i
= 0; i
< 3; ++i
)
5941 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5944 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
5947 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5951 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5953 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5954 && GET_CODE (pat
) == SET
)
5956 rtx
*p_cc_reg
= (rtx
*) data
;
5957 gcc_assert (!*p_cc_reg
);
5962 /* This is a helper function for the other atomic operations. This function
5963 emits a loop that contains SEQ that iterates until a compare-and-swap
5964 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5965 a set of instructions that takes a value from OLD_REG as an input and
5966 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5967 set to the current contents of MEM. After SEQ, a compare-and-swap will
5968 attempt to update MEM with NEW_REG. The function returns true when the
5969 loop was generated successfully. */
5972 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5974 machine_mode mode
= GET_MODE (mem
);
5975 rtx_code_label
*label
;
5976 rtx cmp_reg
, success
, oldval
;
5978 /* The loop we want to generate looks like
5984 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5988 Note that we only do the plain load from memory once. Subsequent
5989 iterations use the value loaded by the compare-and-swap pattern. */
5991 label
= gen_label_rtx ();
5992 cmp_reg
= gen_reg_rtx (mode
);
5994 emit_move_insn (cmp_reg
, mem
);
5996 emit_move_insn (old_reg
, cmp_reg
);
6002 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6003 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6007 if (oldval
!= cmp_reg
)
6008 emit_move_insn (cmp_reg
, oldval
);
6010 /* Mark this jump predicted not taken. */
6011 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6012 GET_MODE (success
), 1, label
,
6013 profile_probability::guessed_never ());
6018 /* This function tries to emit an atomic_exchange intruction. VAL is written
6019 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6020 using TARGET if possible. */
6023 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6025 machine_mode mode
= GET_MODE (mem
);
6026 enum insn_code icode
;
6028 /* If the target supports the exchange directly, great. */
6029 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6030 if (icode
!= CODE_FOR_nothing
)
6032 class expand_operand ops
[4];
6034 create_output_operand (&ops
[0], target
, mode
);
6035 create_fixed_operand (&ops
[1], mem
);
6036 create_input_operand (&ops
[2], val
, mode
);
6037 create_integer_operand (&ops
[3], model
);
6038 if (maybe_expand_insn (icode
, 4, ops
))
6039 return ops
[0].value
;
6045 /* This function tries to implement an atomic exchange operation using
6046 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6047 The previous contents of *MEM are returned, using TARGET if possible.
6048 Since this instructionn is an acquire barrier only, stronger memory
6049 models may require additional barriers to be emitted. */
6052 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6053 enum memmodel model
)
6055 machine_mode mode
= GET_MODE (mem
);
6056 enum insn_code icode
;
6057 rtx_insn
*last_insn
= get_last_insn ();
6059 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6061 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6062 exists, and the memory model is stronger than acquire, add a release
6063 barrier before the instruction. */
6065 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6066 expand_mem_thread_fence (model
);
6068 if (icode
!= CODE_FOR_nothing
)
6070 class expand_operand ops
[3];
6071 create_output_operand (&ops
[0], target
, mode
);
6072 create_fixed_operand (&ops
[1], mem
);
6073 create_input_operand (&ops
[2], val
, mode
);
6074 if (maybe_expand_insn (icode
, 3, ops
))
6075 return ops
[0].value
;
6078 /* If an external test-and-set libcall is provided, use that instead of
6079 any external compare-and-swap that we might get from the compare-and-
6080 swap-loop expansion later. */
6081 if (!can_compare_and_swap_p (mode
, false))
6083 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6084 if (libfunc
!= NULL
)
6088 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6089 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6090 mode
, addr
, ptr_mode
,
6095 /* If the test_and_set can't be emitted, eliminate any barrier that might
6096 have been emitted. */
6097 delete_insns_since (last_insn
);
6101 /* This function tries to implement an atomic exchange operation using a
6102 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6103 *MEM are returned, using TARGET if possible. No memory model is required
6104 since a compare_and_swap loop is seq-cst. */
6107 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6109 machine_mode mode
= GET_MODE (mem
);
6111 if (can_compare_and_swap_p (mode
, true))
6113 if (!target
|| !register_operand (target
, mode
))
6114 target
= gen_reg_rtx (mode
);
6115 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6122 /* This function tries to implement an atomic test-and-set operation
6123 using the atomic_test_and_set instruction pattern. A boolean value
6124 is returned from the operation, using TARGET if possible. */
6127 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6129 machine_mode pat_bool_mode
;
6130 class expand_operand ops
[3];
6132 if (!targetm
.have_atomic_test_and_set ())
6135 /* While we always get QImode from __atomic_test_and_set, we get
6136 other memory modes from __sync_lock_test_and_set. Note that we
6137 use no endian adjustment here. This matches the 4.6 behavior
6138 in the Sparc backend. */
6139 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6140 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6141 if (GET_MODE (mem
) != QImode
)
6142 mem
= adjust_address_nv (mem
, QImode
, 0);
6144 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6145 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6146 create_fixed_operand (&ops
[1], mem
);
6147 create_integer_operand (&ops
[2], model
);
6149 if (maybe_expand_insn (icode
, 3, ops
))
6150 return ops
[0].value
;
6154 /* This function expands the legacy _sync_lock test_and_set operation which is
6155 generally an atomic exchange. Some limited targets only allow the
6156 constant 1 to be stored. This is an ACQUIRE operation.
6158 TARGET is an optional place to stick the return value.
6159 MEM is where VAL is stored. */
6162 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6166 /* Try an atomic_exchange first. */
6167 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6171 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6172 MEMMODEL_SYNC_ACQUIRE
);
6176 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6180 /* If there are no other options, try atomic_test_and_set if the value
6181 being stored is 1. */
6182 if (val
== const1_rtx
)
6183 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6188 /* This function expands the atomic test_and_set operation:
6189 atomically store a boolean TRUE into MEM and return the previous value.
6191 MEMMODEL is the memory model variant to use.
6192 TARGET is an optional place to stick the return value. */
6195 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6197 machine_mode mode
= GET_MODE (mem
);
6198 rtx ret
, trueval
, subtarget
;
6200 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6204 /* Be binary compatible with non-default settings of trueval, and different
6205 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6206 another only has atomic-exchange. */
6207 if (targetm
.atomic_test_and_set_trueval
== 1)
6209 trueval
= const1_rtx
;
6210 subtarget
= target
? target
: gen_reg_rtx (mode
);
6214 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6215 subtarget
= gen_reg_rtx (mode
);
6218 /* Try the atomic-exchange optab... */
6219 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6221 /* ... then an atomic-compare-and-swap loop ... */
6223 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6225 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6227 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6229 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6230 things with the value 1. Thus we try again without trueval. */
6231 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6232 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6234 /* Failing all else, assume a single threaded environment and simply
6235 perform the operation. */
6238 /* If the result is ignored skip the move to target. */
6239 if (subtarget
!= const0_rtx
)
6240 emit_move_insn (subtarget
, mem
);
6242 emit_move_insn (mem
, trueval
);
6246 /* Recall that have to return a boolean value; rectify if trueval
6247 is not exactly one. */
6248 if (targetm
.atomic_test_and_set_trueval
!= 1)
6249 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6254 /* This function expands the atomic exchange operation:
6255 atomically store VAL in MEM and return the previous value in MEM.
6257 MEMMODEL is the memory model variant to use.
6258 TARGET is an optional place to stick the return value. */
6261 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6263 machine_mode mode
= GET_MODE (mem
);
6266 /* If loads are not atomic for the required size and we are not called to
6267 provide a __sync builtin, do not do anything so that we stay consistent
6268 with atomic loads of the same size. */
6269 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6272 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6274 /* Next try a compare-and-swap loop for the exchange. */
6276 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6281 /* This function expands the atomic compare exchange operation:
6283 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6284 *PTARGET_OVAL is an optional place to store the old value from memory.
6285 Both target parameters may be NULL or const0_rtx to indicate that we do
6286 not care about that return value. Both target parameters are updated on
6287 success to the actual location of the corresponding result.
6289 MEMMODEL is the memory model variant to use.
6291 The return value of the function is true for success. */
6294 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6295 rtx mem
, rtx expected
, rtx desired
,
6296 bool is_weak
, enum memmodel succ_model
,
6297 enum memmodel fail_model
)
6299 machine_mode mode
= GET_MODE (mem
);
6300 class expand_operand ops
[8];
6301 enum insn_code icode
;
6302 rtx target_oval
, target_bool
= NULL_RTX
;
6305 /* If loads are not atomic for the required size and we are not called to
6306 provide a __sync builtin, do not do anything so that we stay consistent
6307 with atomic loads of the same size. */
6308 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6311 /* Load expected into a register for the compare and swap. */
6312 if (MEM_P (expected
))
6313 expected
= copy_to_reg (expected
);
6315 /* Make sure we always have some place to put the return oldval.
6316 Further, make sure that place is distinct from the input expected,
6317 just in case we need that path down below. */
6318 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6319 ptarget_oval
= NULL
;
6321 if (ptarget_oval
== NULL
6322 || (target_oval
= *ptarget_oval
) == NULL
6323 || reg_overlap_mentioned_p (expected
, target_oval
))
6324 target_oval
= gen_reg_rtx (mode
);
6326 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6327 if (icode
!= CODE_FOR_nothing
)
6329 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6331 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6332 ptarget_bool
= NULL
;
6334 /* Make sure we always have a place for the bool operand. */
6335 if (ptarget_bool
== NULL
6336 || (target_bool
= *ptarget_bool
) == NULL
6337 || GET_MODE (target_bool
) != bool_mode
)
6338 target_bool
= gen_reg_rtx (bool_mode
);
6340 /* Emit the compare_and_swap. */
6341 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6342 create_output_operand (&ops
[1], target_oval
, mode
);
6343 create_fixed_operand (&ops
[2], mem
);
6344 create_input_operand (&ops
[3], expected
, mode
);
6345 create_input_operand (&ops
[4], desired
, mode
);
6346 create_integer_operand (&ops
[5], is_weak
);
6347 create_integer_operand (&ops
[6], succ_model
);
6348 create_integer_operand (&ops
[7], fail_model
);
6349 if (maybe_expand_insn (icode
, 8, ops
))
6351 /* Return success/failure. */
6352 target_bool
= ops
[0].value
;
6353 target_oval
= ops
[1].value
;
6358 /* Otherwise fall back to the original __sync_val_compare_and_swap
6359 which is always seq-cst. */
6360 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6361 if (icode
!= CODE_FOR_nothing
)
6365 create_output_operand (&ops
[0], target_oval
, mode
);
6366 create_fixed_operand (&ops
[1], mem
);
6367 create_input_operand (&ops
[2], expected
, mode
);
6368 create_input_operand (&ops
[3], desired
, mode
);
6369 if (!maybe_expand_insn (icode
, 4, ops
))
6372 target_oval
= ops
[0].value
;
6374 /* If the caller isn't interested in the boolean return value,
6375 skip the computation of it. */
6376 if (ptarget_bool
== NULL
)
6379 /* Otherwise, work out if the compare-and-swap succeeded. */
6381 if (have_insn_for (COMPARE
, CCmode
))
6382 note_stores (get_last_insn (), find_cc_set
, &cc_reg
);
6385 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6386 const0_rtx
, VOIDmode
, 0, 1);
6389 goto success_bool_from_val
;
6392 /* Also check for library support for __sync_val_compare_and_swap. */
6393 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6394 if (libfunc
!= NULL
)
6396 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6397 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6398 mode
, addr
, ptr_mode
,
6399 expected
, mode
, desired
, mode
);
6400 emit_move_insn (target_oval
, target
);
6402 /* Compute the boolean return value only if requested. */
6404 goto success_bool_from_val
;
6412 success_bool_from_val
:
6413 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6414 expected
, VOIDmode
, 1, 1);
6416 /* Make sure that the oval output winds up where the caller asked. */
6418 *ptarget_oval
= target_oval
;
6420 *ptarget_bool
= target_bool
;
6424 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6427 expand_asm_memory_blockage (void)
6431 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6432 rtvec_alloc (0), rtvec_alloc (0),
6433 rtvec_alloc (0), UNKNOWN_LOCATION
);
6434 MEM_VOLATILE_P (asm_op
) = 1;
6436 clob
= gen_rtx_SCRATCH (VOIDmode
);
6437 clob
= gen_rtx_MEM (BLKmode
, clob
);
6438 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6440 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6443 /* Do not propagate memory accesses across this point. */
6446 expand_memory_blockage (void)
6448 if (targetm
.have_memory_blockage ())
6449 emit_insn (targetm
.gen_memory_blockage ());
6451 expand_asm_memory_blockage ();
6454 /* This routine will either emit the mem_thread_fence pattern or issue a
6455 sync_synchronize to generate a fence for memory model MEMMODEL. */
6458 expand_mem_thread_fence (enum memmodel model
)
6460 if (is_mm_relaxed (model
))
6462 if (targetm
.have_mem_thread_fence ())
6464 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6465 expand_memory_blockage ();
6467 else if (targetm
.have_memory_barrier ())
6468 emit_insn (targetm
.gen_memory_barrier ());
6469 else if (synchronize_libfunc
!= NULL_RTX
)
6470 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
6472 expand_memory_blockage ();
6475 /* Emit a signal fence with given memory model. */
6478 expand_mem_signal_fence (enum memmodel model
)
6480 /* No machine barrier is required to implement a signal fence, but
6481 a compiler memory barrier must be issued, except for relaxed MM. */
6482 if (!is_mm_relaxed (model
))
6483 expand_memory_blockage ();
6486 /* This function expands the atomic load operation:
6487 return the atomically loaded value in MEM.
6489 MEMMODEL is the memory model variant to use.
6490 TARGET is an option place to stick the return value. */
6493 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6495 machine_mode mode
= GET_MODE (mem
);
6496 enum insn_code icode
;
6498 /* If the target supports the load directly, great. */
6499 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6500 if (icode
!= CODE_FOR_nothing
)
6502 class expand_operand ops
[3];
6503 rtx_insn
*last
= get_last_insn ();
6504 if (is_mm_seq_cst (model
))
6505 expand_memory_blockage ();
6507 create_output_operand (&ops
[0], target
, mode
);
6508 create_fixed_operand (&ops
[1], mem
);
6509 create_integer_operand (&ops
[2], model
);
6510 if (maybe_expand_insn (icode
, 3, ops
))
6512 if (!is_mm_relaxed (model
))
6513 expand_memory_blockage ();
6514 return ops
[0].value
;
6516 delete_insns_since (last
);
6519 /* If the size of the object is greater than word size on this target,
6520 then we assume that a load will not be atomic. We could try to
6521 emulate a load with a compare-and-swap operation, but the store that
6522 doing this could result in would be incorrect if this is a volatile
6523 atomic load or targetting read-only-mapped memory. */
6524 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6525 /* If there is no atomic load, leave the library call. */
6528 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6529 if (!target
|| target
== const0_rtx
)
6530 target
= gen_reg_rtx (mode
);
6532 /* For SEQ_CST, emit a barrier before the load. */
6533 if (is_mm_seq_cst (model
))
6534 expand_mem_thread_fence (model
);
6536 emit_move_insn (target
, mem
);
6538 /* Emit the appropriate barrier after the load. */
6539 expand_mem_thread_fence (model
);
6544 /* This function expands the atomic store operation:
6545 Atomically store VAL in MEM.
6546 MEMMODEL is the memory model variant to use.
6547 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6548 function returns const0_rtx if a pattern was emitted. */
6551 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6553 machine_mode mode
= GET_MODE (mem
);
6554 enum insn_code icode
;
6555 class expand_operand ops
[3];
6557 /* If the target supports the store directly, great. */
6558 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6559 if (icode
!= CODE_FOR_nothing
)
6561 rtx_insn
*last
= get_last_insn ();
6562 if (!is_mm_relaxed (model
))
6563 expand_memory_blockage ();
6564 create_fixed_operand (&ops
[0], mem
);
6565 create_input_operand (&ops
[1], val
, mode
);
6566 create_integer_operand (&ops
[2], model
);
6567 if (maybe_expand_insn (icode
, 3, ops
))
6569 if (is_mm_seq_cst (model
))
6570 expand_memory_blockage ();
6573 delete_insns_since (last
);
6576 /* If using __sync_lock_release is a viable alternative, try it.
6577 Note that this will not be set to true if we are expanding a generic
6578 __atomic_store_n. */
6581 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6582 if (icode
!= CODE_FOR_nothing
)
6584 create_fixed_operand (&ops
[0], mem
);
6585 create_input_operand (&ops
[1], const0_rtx
, mode
);
6586 if (maybe_expand_insn (icode
, 2, ops
))
6588 /* lock_release is only a release barrier. */
6589 if (is_mm_seq_cst (model
))
6590 expand_mem_thread_fence (model
);
6596 /* If the size of the object is greater than word size on this target,
6597 a default store will not be atomic. */
6598 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6600 /* If loads are atomic or we are called to provide a __sync builtin,
6601 we can try a atomic_exchange and throw away the result. Otherwise,
6602 don't do anything so that we do not create an inconsistency between
6603 loads and stores. */
6604 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
6606 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6608 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
6616 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6617 expand_mem_thread_fence (model
);
6619 emit_move_insn (mem
, val
);
6621 /* For SEQ_CST, also emit a barrier after the store. */
6622 if (is_mm_seq_cst (model
))
6623 expand_mem_thread_fence (model
);
6629 /* Structure containing the pointers and values required to process the
6630 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6632 struct atomic_op_functions
6634 direct_optab mem_fetch_before
;
6635 direct_optab mem_fetch_after
;
6636 direct_optab mem_no_result
;
6639 direct_optab no_result
;
6640 enum rtx_code reverse_code
;
6644 /* Fill in structure pointed to by OP with the various optab entries for an
6645 operation of type CODE. */
6648 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6650 gcc_assert (op
!= NULL
);
6652 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6653 in the source code during compilation, and the optab entries are not
6654 computable until runtime. Fill in the values at runtime. */
6658 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6659 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6660 op
->mem_no_result
= atomic_add_optab
;
6661 op
->fetch_before
= sync_old_add_optab
;
6662 op
->fetch_after
= sync_new_add_optab
;
6663 op
->no_result
= sync_add_optab
;
6664 op
->reverse_code
= MINUS
;
6667 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6668 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6669 op
->mem_no_result
= atomic_sub_optab
;
6670 op
->fetch_before
= sync_old_sub_optab
;
6671 op
->fetch_after
= sync_new_sub_optab
;
6672 op
->no_result
= sync_sub_optab
;
6673 op
->reverse_code
= PLUS
;
6676 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6677 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6678 op
->mem_no_result
= atomic_xor_optab
;
6679 op
->fetch_before
= sync_old_xor_optab
;
6680 op
->fetch_after
= sync_new_xor_optab
;
6681 op
->no_result
= sync_xor_optab
;
6682 op
->reverse_code
= XOR
;
6685 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6686 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6687 op
->mem_no_result
= atomic_and_optab
;
6688 op
->fetch_before
= sync_old_and_optab
;
6689 op
->fetch_after
= sync_new_and_optab
;
6690 op
->no_result
= sync_and_optab
;
6691 op
->reverse_code
= UNKNOWN
;
6694 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6695 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6696 op
->mem_no_result
= atomic_or_optab
;
6697 op
->fetch_before
= sync_old_ior_optab
;
6698 op
->fetch_after
= sync_new_ior_optab
;
6699 op
->no_result
= sync_ior_optab
;
6700 op
->reverse_code
= UNKNOWN
;
6703 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6704 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6705 op
->mem_no_result
= atomic_nand_optab
;
6706 op
->fetch_before
= sync_old_nand_optab
;
6707 op
->fetch_after
= sync_new_nand_optab
;
6708 op
->no_result
= sync_nand_optab
;
6709 op
->reverse_code
= UNKNOWN
;
6716 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6717 using memory order MODEL. If AFTER is true the operation needs to return
6718 the value of *MEM after the operation, otherwise the previous value.
6719 TARGET is an optional place to place the result. The result is unused if
6721 Return the result if there is a better sequence, otherwise NULL_RTX. */
6724 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6725 enum memmodel model
, bool after
)
6727 /* If the value is prefetched, or not used, it may be possible to replace
6728 the sequence with a native exchange operation. */
6729 if (!after
|| target
== const0_rtx
)
6731 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6732 if (code
== AND
&& val
== const0_rtx
)
6734 if (target
== const0_rtx
)
6735 target
= gen_reg_rtx (GET_MODE (mem
));
6736 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6739 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6740 if (code
== IOR
&& val
== constm1_rtx
)
6742 if (target
== const0_rtx
)
6743 target
= gen_reg_rtx (GET_MODE (mem
));
6744 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6751 /* Try to emit an instruction for a specific operation varaition.
6752 OPTAB contains the OP functions.
6753 TARGET is an optional place to return the result. const0_rtx means unused.
6754 MEM is the memory location to operate on.
6755 VAL is the value to use in the operation.
6756 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6757 MODEL is the memory model, if used.
6758 AFTER is true if the returned result is the value after the operation. */
6761 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6762 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6764 machine_mode mode
= GET_MODE (mem
);
6765 class expand_operand ops
[4];
6766 enum insn_code icode
;
6770 /* Check to see if there is a result returned. */
6771 if (target
== const0_rtx
)
6775 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6776 create_integer_operand (&ops
[2], model
);
6781 icode
= direct_optab_handler (optab
->no_result
, mode
);
6785 /* Otherwise, we need to generate a result. */
6790 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6791 : optab
->mem_fetch_before
, mode
);
6792 create_integer_operand (&ops
[3], model
);
6797 icode
= optab_handler (after
? optab
->fetch_after
6798 : optab
->fetch_before
, mode
);
6801 create_output_operand (&ops
[op_counter
++], target
, mode
);
6803 if (icode
== CODE_FOR_nothing
)
6806 create_fixed_operand (&ops
[op_counter
++], mem
);
6807 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6808 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6810 if (maybe_expand_insn (icode
, num_ops
, ops
))
6811 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6817 /* This function expands an atomic fetch_OP or OP_fetch operation:
6818 TARGET is an option place to stick the return value. const0_rtx indicates
6819 the result is unused.
6820 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6821 CODE is the operation being performed (OP)
6822 MEMMODEL is the memory model variant to use.
6823 AFTER is true to return the result of the operation (OP_fetch).
6824 AFTER is false to return the value before the operation (fetch_OP).
6826 This function will *only* generate instructions if there is a direct
6827 optab. No compare and swap loops or libcalls will be generated. */
6830 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6831 enum rtx_code code
, enum memmodel model
,
6834 machine_mode mode
= GET_MODE (mem
);
6835 struct atomic_op_functions optab
;
6837 bool unused_result
= (target
== const0_rtx
);
6839 get_atomic_op_for_code (&optab
, code
);
6841 /* Check to see if there are any better instructions. */
6842 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6846 /* Check for the case where the result isn't used and try those patterns. */
6849 /* Try the memory model variant first. */
6850 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6854 /* Next try the old style withuot a memory model. */
6855 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6859 /* There is no no-result pattern, so try patterns with a result. */
6863 /* Try the __atomic version. */
6864 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6868 /* Try the older __sync version. */
6869 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6873 /* If the fetch value can be calculated from the other variation of fetch,
6874 try that operation. */
6875 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6877 /* Try the __atomic version, then the older __sync version. */
6878 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6880 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6884 /* If the result isn't used, no need to do compensation code. */
6888 /* Issue compensation code. Fetch_after == fetch_before OP val.
6889 Fetch_before == after REVERSE_OP val. */
6891 code
= optab
.reverse_code
;
6894 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6895 true, OPTAB_LIB_WIDEN
);
6896 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6899 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6900 true, OPTAB_LIB_WIDEN
);
6905 /* No direct opcode can be generated. */
6911 /* This function expands an atomic fetch_OP or OP_fetch operation:
6912 TARGET is an option place to stick the return value. const0_rtx indicates
6913 the result is unused.
6914 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6915 CODE is the operation being performed (OP)
6916 MEMMODEL is the memory model variant to use.
6917 AFTER is true to return the result of the operation (OP_fetch).
6918 AFTER is false to return the value before the operation (fetch_OP). */
6920 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6921 enum memmodel model
, bool after
)
6923 machine_mode mode
= GET_MODE (mem
);
6925 bool unused_result
= (target
== const0_rtx
);
6927 /* If loads are not atomic for the required size and we are not called to
6928 provide a __sync builtin, do not do anything so that we stay consistent
6929 with atomic loads of the same size. */
6930 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6933 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6939 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6940 if (code
== PLUS
|| code
== MINUS
)
6943 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6946 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6947 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6951 /* PLUS worked so emit the insns and return. */
6958 /* PLUS did not work, so throw away the negation code and continue. */
6962 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6963 if (!can_compare_and_swap_p (mode
, false))
6967 enum rtx_code orig_code
= code
;
6968 struct atomic_op_functions optab
;
6970 get_atomic_op_for_code (&optab
, code
);
6971 libfunc
= optab_libfunc (after
? optab
.fetch_after
6972 : optab
.fetch_before
, mode
);
6974 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
6978 code
= optab
.reverse_code
;
6979 libfunc
= optab_libfunc (after
? optab
.fetch_before
6980 : optab
.fetch_after
, mode
);
6982 if (libfunc
!= NULL
)
6984 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6985 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
6986 addr
, ptr_mode
, val
, mode
);
6988 if (!unused_result
&& fixup
)
6989 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6990 true, OPTAB_LIB_WIDEN
);
6994 /* We need the original code for any further attempts. */
6998 /* If nothing else has succeeded, default to a compare and swap loop. */
6999 if (can_compare_and_swap_p (mode
, true))
7002 rtx t0
= gen_reg_rtx (mode
), t1
;
7006 /* If the result is used, get a register for it. */
7009 if (!target
|| !register_operand (target
, mode
))
7010 target
= gen_reg_rtx (mode
);
7011 /* If fetch_before, copy the value now. */
7013 emit_move_insn (target
, t0
);
7016 target
= const0_rtx
;
7021 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7022 true, OPTAB_LIB_WIDEN
);
7023 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7026 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7029 /* For after, copy the value now. */
7030 if (!unused_result
&& after
)
7031 emit_move_insn (target
, t1
);
7032 insn
= get_insns ();
7035 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7042 /* Return true if OPERAND is suitable for operand number OPNO of
7043 instruction ICODE. */
7046 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7048 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7049 || (insn_data
[(int) icode
].operand
[opno
].predicate
7050 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7053 /* TARGET is a target of a multiword operation that we are going to
7054 implement as a series of word-mode operations. Return true if
7055 TARGET is suitable for this purpose. */
7058 valid_multiword_target_p (rtx target
)
7063 mode
= GET_MODE (target
);
7064 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7066 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7067 if (!validate_subreg (word_mode
, mode
, target
, i
))
7072 /* Make OP describe an input operand that has value INTVAL and that has
7073 no inherent mode. This function should only be used for operands that
7074 are always expand-time constants. The backend may request that INTVAL
7075 be copied into a different kind of rtx, but it must specify the mode
7076 of that rtx if so. */
7079 create_integer_operand (class expand_operand
*op
, poly_int64 intval
)
7081 create_expand_operand (op
, EXPAND_INTEGER
,
7082 gen_int_mode (intval
, MAX_MODE_INT
),
7083 VOIDmode
, false, intval
);
7086 /* Like maybe_legitimize_operand, but do not change the code of the
7087 current rtx value. */
7090 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7091 class expand_operand
*op
)
7093 /* See if the operand matches in its current form. */
7094 if (insn_operand_matches (icode
, opno
, op
->value
))
7097 /* If the operand is a memory whose address has no side effects,
7098 try forcing the address into a non-virtual pseudo register.
7099 The check for side effects is important because copy_to_mode_reg
7100 cannot handle things like auto-modified addresses. */
7101 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7106 addr
= XEXP (mem
, 0);
7107 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7108 && !side_effects_p (addr
))
7113 last
= get_last_insn ();
7114 mode
= get_address_mode (mem
);
7115 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
7116 if (insn_operand_matches (icode
, opno
, mem
))
7121 delete_insns_since (last
);
7128 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7129 on success, storing the new operand value back in OP. */
7132 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7133 class expand_operand
*op
)
7135 machine_mode mode
, imode
, tmode
;
7142 temporary_volatile_ok
v (true);
7143 return maybe_legitimize_operand_same_code (icode
, opno
, op
);
7147 gcc_assert (mode
!= VOIDmode
);
7149 && op
->value
!= const0_rtx
7150 && GET_MODE (op
->value
) == mode
7151 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7154 op
->value
= gen_reg_rtx (mode
);
7160 gcc_assert (mode
!= VOIDmode
);
7161 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7162 || GET_MODE (op
->value
) == mode
);
7163 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7166 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7169 case EXPAND_CONVERT_TO
:
7170 gcc_assert (mode
!= VOIDmode
);
7171 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7174 case EXPAND_CONVERT_FROM
:
7175 if (GET_MODE (op
->value
) != VOIDmode
)
7176 mode
= GET_MODE (op
->value
);
7178 /* The caller must tell us what mode this value has. */
7179 gcc_assert (mode
!= VOIDmode
);
7181 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7182 tmode
= (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
)
7183 ? GET_MODE_INNER (imode
) : imode
);
7184 if (tmode
!= VOIDmode
&& tmode
!= mode
)
7186 op
->value
= convert_modes (tmode
, mode
, op
->value
, op
->unsigned_p
);
7189 if (imode
!= VOIDmode
&& imode
!= mode
)
7191 gcc_assert (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
));
7192 op
->value
= expand_vector_broadcast (imode
, op
->value
);
7197 case EXPAND_ADDRESS
:
7198 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
7202 case EXPAND_INTEGER
:
7203 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7204 if (mode
!= VOIDmode
7205 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
7208 op
->value
= gen_int_mode (op
->int_value
, mode
);
7213 return insn_operand_matches (icode
, opno
, op
->value
);
7216 /* Make OP describe an input operand that should have the same value
7217 as VALUE, after any mode conversion that the target might request.
7218 TYPE is the type of VALUE. */
7221 create_convert_operand_from_type (class expand_operand
*op
,
7222 rtx value
, tree type
)
7224 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7225 TYPE_UNSIGNED (type
));
7228 /* Return true if the requirements on operands OP1 and OP2 of instruction
7229 ICODE are similar enough for the result of legitimizing OP1 to be
7230 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7231 with OP1 and OP2 respectively. */
7234 can_reuse_operands_p (enum insn_code icode
,
7235 unsigned int opno1
, unsigned int opno2
,
7236 const class expand_operand
*op1
,
7237 const class expand_operand
*op2
)
7239 /* Check requirements that are common to all types. */
7240 if (op1
->type
!= op2
->type
7241 || op1
->mode
!= op2
->mode
7242 || (insn_data
[(int) icode
].operand
[opno1
].mode
7243 != insn_data
[(int) icode
].operand
[opno2
].mode
))
7246 /* Check the requirements for specific types. */
7250 /* Outputs must remain distinct. */
7255 case EXPAND_ADDRESS
:
7256 case EXPAND_INTEGER
:
7259 case EXPAND_CONVERT_TO
:
7260 case EXPAND_CONVERT_FROM
:
7261 return op1
->unsigned_p
== op2
->unsigned_p
;
7266 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7267 of instruction ICODE. Return true on success, leaving the new operand
7268 values in the OPS themselves. Emit no code on failure. */
7271 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7272 unsigned int nops
, class expand_operand
*ops
)
7274 rtx_insn
*last
= get_last_insn ();
7275 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
7276 for (unsigned int i
= 0; i
< nops
; i
++)
7278 orig_values
[i
] = ops
[i
].value
;
7280 /* First try reusing the result of an earlier legitimization.
7281 This avoids duplicate rtl and ensures that tied operands
7284 This search is linear, but NOPS is bounded at compile time
7285 to a small number (current a single digit). */
7288 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
7289 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
7291 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
7293 ops
[i
].value
= copy_rtx (ops
[j
].value
);
7297 /* Otherwise try legitimizing the operand on its own. */
7298 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7300 delete_insns_since (last
);
7307 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7308 as its operands. Return the instruction pattern on success,
7309 and emit any necessary set-up code. Return null and emit no
7313 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7314 class expand_operand
*ops
)
7316 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7317 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7323 return GEN_FCN (icode
) (ops
[0].value
);
7325 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7327 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7329 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7332 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7333 ops
[3].value
, ops
[4].value
);
7335 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7336 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7338 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7339 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7342 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7343 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7344 ops
[6].value
, ops
[7].value
);
7346 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7347 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7348 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7353 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7354 as its operands. Return true on success and emit no code on failure. */
7357 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7358 class expand_operand
*ops
)
7360 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7369 /* Like maybe_expand_insn, but for jumps. */
7372 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7373 class expand_operand
*ops
)
7375 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7378 emit_jump_insn (pat
);
7384 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7388 expand_insn (enum insn_code icode
, unsigned int nops
,
7389 class expand_operand
*ops
)
7391 if (!maybe_expand_insn (icode
, nops
, ops
))
7395 /* Like expand_insn, but for jumps. */
7398 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7399 class expand_operand
*ops
)
7401 if (!maybe_expand_jump_insn (icode
, nops
, ops
))