1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
46 #include "basic-block.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table
[OTI_MAX
];
60 rtx libfunc_table
[LTI_MAX
];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table
[COI_MAX
];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab
[NUM_RTX_CODE
+ 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
88 /* Indexed by the machine mode, gives the insn code for vector conditional
91 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
92 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx
;
99 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
100 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
102 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
);
105 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
107 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
108 static optab
new_optab (void);
109 static convert_optab
new_convert_optab (void);
110 static inline optab
init_optab (enum rtx_code
);
111 static inline optab
init_optabv (enum rtx_code
);
112 static inline convert_optab
init_convert_optab (enum rtx_code
);
113 static void init_libfuncs (optab
, int, int, const char *, int);
114 static void init_integral_libfuncs (optab
, const char *, int);
115 static void init_floating_libfuncs (optab
, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
117 enum mode_class
, enum mode_class
);
118 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
119 enum mode_class
, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
121 enum rtx_code
, int, rtx
);
122 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
123 enum machine_mode
*, int *);
124 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
125 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
126 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
127 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
134 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135 the result of operation CODE applied to OP0 (and OP1 if it is a binary
138 If the last insn does not set TARGET, don't do anything, but return 1.
140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141 don't add the REG_EQUAL note but return 0. Our caller can then try
142 again, ensuring that TARGET is not one of the operands. */
145 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
147 rtx last_insn
, insn
, set
;
150 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
152 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
153 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
154 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_COMPARE
156 && GET_RTX_CLASS (code
) != RTX_UNARY
)
159 if (GET_CODE (target
) == ZERO_EXTRACT
)
162 for (last_insn
= insns
;
163 NEXT_INSN (last_insn
) != NULL_RTX
;
164 last_insn
= NEXT_INSN (last_insn
))
167 set
= single_set (last_insn
);
171 if (! rtx_equal_p (SET_DEST (set
), target
)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
174 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178 besides the last insn. */
179 if (reg_overlap_mentioned_p (target
, op0
)
180 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
182 insn
= PREV_INSN (last_insn
);
183 while (insn
!= NULL_RTX
)
185 if (reg_set_p (target
, insn
))
188 insn
= PREV_INSN (insn
);
192 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
193 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
195 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
197 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
202 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204 not actually do a sign-extend or zero-extend, but can leave the
205 higher-order bits of the result rtx undefined, for example, in the case
206 of logical operations, but not right shifts. */
209 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
210 int unsignedp
, int no_extend
)
214 /* If we don't have to extend and this is a constant, return it. */
215 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219 extend since it will be more efficient to do so unless the signedness of
220 a promoted object differs from our extension. */
222 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
223 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
224 return convert_modes (mode
, oldmode
, op
, unsignedp
);
226 /* If MODE is no wider than a single word, we return a paradoxical
228 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
229 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
231 /* Otherwise, get an object of MODE, clobber it, and set the low-order
234 result
= gen_reg_rtx (mode
);
235 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
236 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
240 /* Return the optab used for computing the operation given by
241 the tree code, CODE. This function is not always usable (for
242 example, it cannot give complete results for multiplication
243 or division) but probably ought to be relied on more widely
244 throughout the expander. */
246 optab_for_tree_code (enum tree_code code
, tree type
)
258 return one_cmpl_optab
;
267 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
275 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
281 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
290 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
293 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
295 case REALIGN_LOAD_EXPR
:
296 return vec_realign_load_optab
;
299 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
302 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
305 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
308 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
310 case REDUC_PLUS_EXPR
:
311 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
313 case VEC_LSHIFT_EXPR
:
314 return vec_shl_optab
;
316 case VEC_RSHIFT_EXPR
:
317 return vec_shr_optab
;
319 case VEC_WIDEN_MULT_HI_EXPR
:
320 return TYPE_UNSIGNED (type
) ?
321 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
323 case VEC_WIDEN_MULT_LO_EXPR
:
324 return TYPE_UNSIGNED (type
) ?
325 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
327 case VEC_UNPACK_HI_EXPR
:
328 return TYPE_UNSIGNED (type
) ?
329 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
331 case VEC_UNPACK_LO_EXPR
:
332 return TYPE_UNSIGNED (type
) ?
333 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
335 case VEC_PACK_MOD_EXPR
:
336 return vec_pack_mod_optab
;
338 case VEC_PACK_SAT_EXPR
:
339 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
345 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
349 return trapv
? addv_optab
: add_optab
;
352 return trapv
? subv_optab
: sub_optab
;
355 return trapv
? smulv_optab
: smul_optab
;
358 return trapv
? negv_optab
: neg_optab
;
361 return trapv
? absv_optab
: abs_optab
;
363 case VEC_EXTRACT_EVEN_EXPR
:
364 return vec_extract_even_optab
;
366 case VEC_EXTRACT_ODD_EXPR
:
367 return vec_extract_odd_optab
;
369 case VEC_INTERLEAVE_HIGH_EXPR
:
370 return vec_interleave_high_optab
;
372 case VEC_INTERLEAVE_LOW_EXPR
:
373 return vec_interleave_low_optab
;
381 /* Expand vector widening operations.
383 There are two different classes of operations handled here:
384 1) Operations whose result is wider than all the arguments to the operation.
385 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
386 In this case OP0 and optionally OP1 would be initialized,
387 but WIDE_OP wouldn't (not relevant for this case).
388 2) Operations whose result is of the same size as the last argument to the
389 operation, but wider than all the other arguments to the operation.
390 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
391 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
393 E.g, when called to expand the following operations, this is how
394 the arguments will be initialized:
396 widening-sum 2 oprnd0 - oprnd1
397 widening-dot-product 3 oprnd0 oprnd1 oprnd2
398 widening-mult 2 oprnd0 oprnd1 -
399 type-promotion (vec-unpack) 1 oprnd0 - - */
402 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
405 tree oprnd0
, oprnd1
, oprnd2
;
406 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
407 optab widen_pattern_optab
;
409 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
412 rtx xop0
, xop1
, wxop
;
413 int nops
= TREE_CODE_LENGTH (TREE_CODE (exp
));
415 oprnd0
= TREE_OPERAND (exp
, 0);
416 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
417 widen_pattern_optab
=
418 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
419 icode
= (int) widen_pattern_optab
->handlers
[(int) tmode0
].insn_code
;
420 gcc_assert (icode
!= CODE_FOR_nothing
);
421 xmode0
= insn_data
[icode
].operand
[1].mode
;
425 oprnd1
= TREE_OPERAND (exp
, 1);
426 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
427 xmode1
= insn_data
[icode
].operand
[2].mode
;
430 /* The last operand is of a wider mode than the rest of the operands. */
438 gcc_assert (tmode1
== tmode0
);
440 oprnd2
= TREE_OPERAND (exp
, 2);
441 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
442 wxmode
= insn_data
[icode
].operand
[3].mode
;
446 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
449 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
450 temp
= gen_reg_rtx (wmode
);
458 /* In case the insn wants input operands in modes different from
459 those of the actual operands, convert the operands. It would
460 seem that we don't need to convert CONST_INTs, but we do, so
461 that they're properly zero-extended, sign-extended or truncated
464 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
465 xop0
= convert_modes (xmode0
,
466 GET_MODE (op0
) != VOIDmode
472 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
473 xop1
= convert_modes (xmode1
,
474 GET_MODE (op1
) != VOIDmode
480 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
481 wxop
= convert_modes (wxmode
,
482 GET_MODE (wide_op
) != VOIDmode
487 /* Now, if insn's predicates don't allow our operands, put them into
490 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
491 && xmode0
!= VOIDmode
)
492 xop0
= copy_to_mode_reg (xmode0
, xop0
);
496 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
497 && xmode1
!= VOIDmode
)
498 xop1
= copy_to_mode_reg (xmode1
, xop1
);
502 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
503 && wxmode
!= VOIDmode
)
504 wxop
= copy_to_mode_reg (wxmode
, wxop
);
506 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
509 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
515 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
516 && wxmode
!= VOIDmode
)
517 wxop
= copy_to_mode_reg (wxmode
, wxop
);
519 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
522 pat
= GEN_FCN (icode
) (temp
, xop0
);
529 /* Generate code to perform an operation specified by TERNARY_OPTAB
530 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
532 UNSIGNEDP is for the case where we have to widen the operands
533 to perform the operation. It says to use zero-extension.
535 If TARGET is nonzero, the value
536 is generated there, if it is convenient to do so.
537 In all cases an rtx is returned for the locus of the value;
538 this may or may not be TARGET. */
541 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
542 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
544 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
545 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
546 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
547 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
550 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
552 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
553 != CODE_FOR_nothing
);
555 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
556 temp
= gen_reg_rtx (mode
);
560 /* In case the insn wants input operands in modes different from
561 those of the actual operands, convert the operands. It would
562 seem that we don't need to convert CONST_INTs, but we do, so
563 that they're properly zero-extended, sign-extended or truncated
566 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
567 xop0
= convert_modes (mode0
,
568 GET_MODE (op0
) != VOIDmode
573 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
574 xop1
= convert_modes (mode1
,
575 GET_MODE (op1
) != VOIDmode
580 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
581 xop2
= convert_modes (mode2
,
582 GET_MODE (op2
) != VOIDmode
587 /* Now, if insn's predicates don't allow our operands, put them into
590 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
591 && mode0
!= VOIDmode
)
592 xop0
= copy_to_mode_reg (mode0
, xop0
);
594 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
595 && mode1
!= VOIDmode
)
596 xop1
= copy_to_mode_reg (mode1
, xop1
);
598 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
599 && mode2
!= VOIDmode
)
600 xop2
= copy_to_mode_reg (mode2
, xop2
);
602 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
609 /* Like expand_binop, but return a constant rtx if the result can be
610 calculated at compile time. The arguments and return value are
611 otherwise the same as for expand_binop. */
614 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
615 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
616 enum optab_methods methods
)
618 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
620 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
626 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
629 /* Like simplify_expand_binop, but always put the result in TARGET.
630 Return true if the expansion succeeded. */
633 force_expand_binop (enum machine_mode mode
, optab binoptab
,
634 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
635 enum optab_methods methods
)
637 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
638 target
, unsignedp
, methods
);
642 emit_move_insn (target
, x
);
646 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
649 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
651 enum insn_code icode
;
652 rtx rtx_op1
, rtx_op2
;
653 enum machine_mode mode1
;
654 enum machine_mode mode2
;
655 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
656 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
657 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
661 switch (TREE_CODE (vec_shift_expr
))
663 case VEC_RSHIFT_EXPR
:
664 shift_optab
= vec_shr_optab
;
666 case VEC_LSHIFT_EXPR
:
667 shift_optab
= vec_shl_optab
;
673 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
674 gcc_assert (icode
!= CODE_FOR_nothing
);
676 mode1
= insn_data
[icode
].operand
[1].mode
;
677 mode2
= insn_data
[icode
].operand
[2].mode
;
679 rtx_op1
= expand_expr (vec_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
680 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
681 && mode1
!= VOIDmode
)
682 rtx_op1
= force_reg (mode1
, rtx_op1
);
684 rtx_op2
= expand_expr (shift_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
685 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
686 && mode2
!= VOIDmode
)
687 rtx_op2
= force_reg (mode2
, rtx_op2
);
690 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
691 target
= gen_reg_rtx (mode
);
693 /* Emit instruction */
694 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
701 /* This subroutine of expand_doubleword_shift handles the cases in which
702 the effective shift value is >= BITS_PER_WORD. The arguments and return
703 value are the same as for the parent routine, except that SUPERWORD_OP1
704 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
705 INTO_TARGET may be null if the caller has decided to calculate it. */
708 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
709 rtx outof_target
, rtx into_target
,
710 int unsignedp
, enum optab_methods methods
)
712 if (into_target
!= 0)
713 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
714 into_target
, unsignedp
, methods
))
717 if (outof_target
!= 0)
719 /* For a signed right shift, we must fill OUTOF_TARGET with copies
720 of the sign bit, otherwise we must fill it with zeros. */
721 if (binoptab
!= ashr_optab
)
722 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
724 if (!force_expand_binop (word_mode
, binoptab
,
725 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
726 outof_target
, unsignedp
, methods
))
732 /* This subroutine of expand_doubleword_shift handles the cases in which
733 the effective shift value is < BITS_PER_WORD. The arguments and return
734 value are the same as for the parent routine. */
737 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
738 rtx outof_input
, rtx into_input
, rtx op1
,
739 rtx outof_target
, rtx into_target
,
740 int unsignedp
, enum optab_methods methods
,
741 unsigned HOST_WIDE_INT shift_mask
)
743 optab reverse_unsigned_shift
, unsigned_shift
;
746 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
747 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
749 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
750 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
751 the opposite direction to BINOPTAB. */
752 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
754 carries
= outof_input
;
755 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
756 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
761 /* We must avoid shifting by BITS_PER_WORD bits since that is either
762 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
763 has unknown behavior. Do a single shift first, then shift by the
764 remainder. It's OK to use ~OP1 as the remainder if shift counts
765 are truncated to the mode size. */
766 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
767 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
768 if (shift_mask
== BITS_PER_WORD
- 1)
770 tmp
= immed_double_const (-1, -1, op1_mode
);
771 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
776 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
777 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
781 if (tmp
== 0 || carries
== 0)
783 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
784 carries
, tmp
, 0, unsignedp
, methods
);
788 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
789 so the result can go directly into INTO_TARGET if convenient. */
790 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
791 into_target
, unsignedp
, methods
);
795 /* Now OR in the bits carried over from OUTOF_INPUT. */
796 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
797 into_target
, unsignedp
, methods
))
800 /* Use a standard word_mode shift for the out-of half. */
801 if (outof_target
!= 0)
802 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
803 outof_target
, unsignedp
, methods
))
810 #ifdef HAVE_conditional_move
811 /* Try implementing expand_doubleword_shift using conditional moves.
812 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
813 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
814 are the shift counts to use in the former and latter case. All other
815 arguments are the same as the parent routine. */
818 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
819 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
820 rtx outof_input
, rtx into_input
,
821 rtx subword_op1
, rtx superword_op1
,
822 rtx outof_target
, rtx into_target
,
823 int unsignedp
, enum optab_methods methods
,
824 unsigned HOST_WIDE_INT shift_mask
)
826 rtx outof_superword
, into_superword
;
828 /* Put the superword version of the output into OUTOF_SUPERWORD and
830 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
831 if (outof_target
!= 0 && subword_op1
== superword_op1
)
833 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
834 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
835 into_superword
= outof_target
;
836 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
837 outof_superword
, 0, unsignedp
, methods
))
842 into_superword
= gen_reg_rtx (word_mode
);
843 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
844 outof_superword
, into_superword
,
849 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
850 if (!expand_subword_shift (op1_mode
, binoptab
,
851 outof_input
, into_input
, subword_op1
,
852 outof_target
, into_target
,
853 unsignedp
, methods
, shift_mask
))
856 /* Select between them. Do the INTO half first because INTO_SUPERWORD
857 might be the current value of OUTOF_TARGET. */
858 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
859 into_target
, into_superword
, word_mode
, false))
862 if (outof_target
!= 0)
863 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
864 outof_target
, outof_superword
,
872 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
873 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
874 input operand; the shift moves bits in the direction OUTOF_INPUT->
875 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
876 of the target. OP1 is the shift count and OP1_MODE is its mode.
877 If OP1 is constant, it will have been truncated as appropriate
878 and is known to be nonzero.
880 If SHIFT_MASK is zero, the result of word shifts is undefined when the
881 shift count is outside the range [0, BITS_PER_WORD). This routine must
882 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
884 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
885 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
886 fill with zeros or sign bits as appropriate.
888 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
889 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
890 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
891 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
894 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
895 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
896 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
897 function wants to calculate it itself.
899 Return true if the shift could be successfully synthesized. */
902 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
903 rtx outof_input
, rtx into_input
, rtx op1
,
904 rtx outof_target
, rtx into_target
,
905 int unsignedp
, enum optab_methods methods
,
906 unsigned HOST_WIDE_INT shift_mask
)
908 rtx superword_op1
, tmp
, cmp1
, cmp2
;
909 rtx subword_label
, done_label
;
910 enum rtx_code cmp_code
;
912 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
913 fill the result with sign or zero bits as appropriate. If so, the value
914 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
915 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
916 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
918 This isn't worthwhile for constant shifts since the optimizers will
919 cope better with in-range shift counts. */
920 if (shift_mask
>= BITS_PER_WORD
922 && !CONSTANT_P (op1
))
924 if (!expand_doubleword_shift (op1_mode
, binoptab
,
925 outof_input
, into_input
, op1
,
927 unsignedp
, methods
, shift_mask
))
929 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
930 outof_target
, unsignedp
, methods
))
935 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
936 is true when the effective shift value is less than BITS_PER_WORD.
937 Set SUPERWORD_OP1 to the shift count that should be used to shift
938 OUTOF_INPUT into INTO_TARGET when the condition is false. */
939 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
940 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
942 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
943 is a subword shift count. */
944 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
946 cmp2
= CONST0_RTX (op1_mode
);
952 /* Set CMP1 to OP1 - BITS_PER_WORD. */
953 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
955 cmp2
= CONST0_RTX (op1_mode
);
957 superword_op1
= cmp1
;
962 /* If we can compute the condition at compile time, pick the
963 appropriate subroutine. */
964 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
965 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
967 if (tmp
== const0_rtx
)
968 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
969 outof_target
, into_target
,
972 return expand_subword_shift (op1_mode
, binoptab
,
973 outof_input
, into_input
, op1
,
974 outof_target
, into_target
,
975 unsignedp
, methods
, shift_mask
);
978 #ifdef HAVE_conditional_move
979 /* Try using conditional moves to generate straight-line code. */
981 rtx start
= get_last_insn ();
982 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
983 cmp_code
, cmp1
, cmp2
,
984 outof_input
, into_input
,
986 outof_target
, into_target
,
987 unsignedp
, methods
, shift_mask
))
989 delete_insns_since (start
);
993 /* As a last resort, use branches to select the correct alternative. */
994 subword_label
= gen_label_rtx ();
995 done_label
= gen_label_rtx ();
998 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
999 0, 0, subword_label
);
1002 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1003 outof_target
, into_target
,
1004 unsignedp
, methods
))
1007 emit_jump_insn (gen_jump (done_label
));
1009 emit_label (subword_label
);
1011 if (!expand_subword_shift (op1_mode
, binoptab
,
1012 outof_input
, into_input
, op1
,
1013 outof_target
, into_target
,
1014 unsignedp
, methods
, shift_mask
))
1017 emit_label (done_label
);
1021 /* Subroutine of expand_binop. Perform a double word multiplication of
1022 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1023 as the target's word_mode. This function return NULL_RTX if anything
1024 goes wrong, in which case it may have already emitted instructions
1025 which need to be deleted.
1027 If we want to multiply two two-word values and have normal and widening
1028 multiplies of single-word values, we can do this with three smaller
1029 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1030 because we are not operating on one word at a time.
1032 The multiplication proceeds as follows:
1033 _______________________
1034 [__op0_high_|__op0_low__]
1035 _______________________
1036 * [__op1_high_|__op1_low__]
1037 _______________________________________________
1038 _______________________
1039 (1) [__op0_low__*__op1_low__]
1040 _______________________
1041 (2a) [__op0_low__*__op1_high_]
1042 _______________________
1043 (2b) [__op0_high_*__op1_low__]
1044 _______________________
1045 (3) [__op0_high_*__op1_high_]
1048 This gives a 4-word result. Since we are only interested in the
1049 lower 2 words, partial result (3) and the upper words of (2a) and
1050 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1051 calculated using non-widening multiplication.
1053 (1), however, needs to be calculated with an unsigned widening
1054 multiplication. If this operation is not directly supported we
1055 try using a signed widening multiplication and adjust the result.
1056 This adjustment works as follows:
1058 If both operands are positive then no adjustment is needed.
1060 If the operands have different signs, for example op0_low < 0 and
1061 op1_low >= 0, the instruction treats the most significant bit of
1062 op0_low as a sign bit instead of a bit with significance
1063 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1064 with 2**BITS_PER_WORD - op0_low, and two's complements the
1065 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1068 Similarly, if both operands are negative, we need to add
1069 (op0_low + op1_low) * 2**BITS_PER_WORD.
1071 We use a trick to adjust quickly. We logically shift op0_low right
1072 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1073 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1074 logical shift exists, we do an arithmetic right shift and subtract
1078 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1079 bool umulp
, enum optab_methods methods
)
1081 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1082 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1083 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1084 rtx product
, adjust
, product_high
, temp
;
1086 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1087 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1088 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1089 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1091 /* If we're using an unsigned multiply to directly compute the product
1092 of the low-order words of the operands and perform any required
1093 adjustments of the operands, we begin by trying two more multiplications
1094 and then computing the appropriate sum.
1096 We have checked above that the required addition is provided.
1097 Full-word addition will normally always succeed, especially if
1098 it is provided at all, so we don't worry about its failure. The
1099 multiplication may well fail, however, so we do handle that. */
1103 /* ??? This could be done with emit_store_flag where available. */
1104 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1105 NULL_RTX
, 1, methods
);
1107 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1108 NULL_RTX
, 0, OPTAB_DIRECT
);
1111 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1112 NULL_RTX
, 0, methods
);
1115 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1116 NULL_RTX
, 0, OPTAB_DIRECT
);
1123 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1124 NULL_RTX
, 0, OPTAB_DIRECT
);
1128 /* OP0_HIGH should now be dead. */
1132 /* ??? This could be done with emit_store_flag where available. */
1133 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1134 NULL_RTX
, 1, methods
);
1136 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1137 NULL_RTX
, 0, OPTAB_DIRECT
);
1140 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1141 NULL_RTX
, 0, methods
);
1144 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1145 NULL_RTX
, 0, OPTAB_DIRECT
);
1152 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1153 NULL_RTX
, 0, OPTAB_DIRECT
);
1157 /* OP1_HIGH should now be dead. */
1159 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1160 adjust
, 0, OPTAB_DIRECT
);
1162 if (target
&& !REG_P (target
))
1166 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1167 target
, 1, OPTAB_DIRECT
);
1169 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1170 target
, 1, OPTAB_DIRECT
);
1175 product_high
= operand_subword (product
, high
, 1, mode
);
1176 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1177 REG_P (product_high
) ? product_high
: adjust
,
1179 emit_move_insn (product_high
, adjust
);
1183 /* Wrapper around expand_binop which takes an rtx code to specify
1184 the operation to perform, not an optab pointer. All other
1185 arguments are the same. */
1187 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1188 rtx op1
, rtx target
, int unsignedp
,
1189 enum optab_methods methods
)
1191 optab binop
= code_to_optab
[(int) code
];
1194 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1197 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1198 binop. Order them according to commutative_operand_precedence and, if
1199 possible, try to put TARGET or a pseudo first. */
1201 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1203 int op0_prec
= commutative_operand_precedence (op0
);
1204 int op1_prec
= commutative_operand_precedence (op1
);
1206 if (op0_prec
< op1_prec
)
1209 if (op0_prec
> op1_prec
)
1212 /* With equal precedence, both orders are ok, but it is better if the
1213 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1214 if (target
== 0 || REG_P (target
))
1215 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1217 return rtx_equal_p (op1
, target
);
1221 /* Generate code to perform an operation specified by BINOPTAB
1222 on operands OP0 and OP1, with result having machine-mode MODE.
1224 UNSIGNEDP is for the case where we have to widen the operands
1225 to perform the operation. It says to use zero-extension.
1227 If TARGET is nonzero, the value
1228 is generated there, if it is convenient to do so.
1229 In all cases an rtx is returned for the locus of the value;
1230 this may or may not be TARGET. */
1233 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1234 rtx target
, int unsignedp
, enum optab_methods methods
)
1236 enum optab_methods next_methods
1237 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1238 ? OPTAB_WIDEN
: methods
);
1239 enum mode_class
class;
1240 enum machine_mode wider_mode
;
1242 int commutative_op
= 0;
1243 int shift_op
= (binoptab
->code
== ASHIFT
1244 || binoptab
->code
== ASHIFTRT
1245 || binoptab
->code
== LSHIFTRT
1246 || binoptab
->code
== ROTATE
1247 || binoptab
->code
== ROTATERT
);
1248 rtx entry_last
= get_last_insn ();
1250 bool first_pass_p
= true;
1252 class = GET_MODE_CLASS (mode
);
1254 /* If subtracting an integer constant, convert this into an addition of
1255 the negated constant. */
1257 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1259 op1
= negate_rtx (mode
, op1
);
1260 binoptab
= add_optab
;
1263 /* If we are inside an appropriately-short loop and we are optimizing,
1264 force expensive constants into a register. */
1265 if (CONSTANT_P (op0
) && optimize
1266 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1268 if (GET_MODE (op0
) != VOIDmode
)
1269 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1270 op0
= force_reg (mode
, op0
);
1273 if (CONSTANT_P (op1
) && optimize
1274 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1276 if (GET_MODE (op1
) != VOIDmode
)
1277 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1278 op1
= force_reg (mode
, op1
);
1281 /* Record where to delete back to if we backtrack. */
1282 last
= get_last_insn ();
1284 /* If operation is commutative,
1285 try to make the first operand a register.
1286 Even better, try to make it the same as the target.
1287 Also try to make the last operand a constant. */
1288 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1289 || binoptab
== smul_widen_optab
1290 || binoptab
== umul_widen_optab
1291 || binoptab
== smul_highpart_optab
1292 || binoptab
== umul_highpart_optab
)
1296 if (swap_commutative_operands_with_target (target
, op0
, op1
))
1306 /* If we can do it with a three-operand insn, do so. */
1308 if (methods
!= OPTAB_MUST_WIDEN
1309 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1311 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1312 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1313 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1314 enum machine_mode tmp_mode
;
1316 rtx xop0
= op0
, xop1
= op1
;
1321 temp
= gen_reg_rtx (mode
);
1323 /* If it is a commutative operator and the modes would match
1324 if we would swap the operands, we can save the conversions. */
1327 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1328 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1332 tmp
= op0
; op0
= op1
; op1
= tmp
;
1333 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1337 /* In case the insn wants input operands in modes different from
1338 those of the actual operands, convert the operands. It would
1339 seem that we don't need to convert CONST_INTs, but we do, so
1340 that they're properly zero-extended, sign-extended or truncated
1343 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1344 xop0
= convert_modes (mode0
,
1345 GET_MODE (op0
) != VOIDmode
1350 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1351 xop1
= convert_modes (mode1
,
1352 GET_MODE (op1
) != VOIDmode
1357 /* Now, if insn's predicates don't allow our operands, put them into
1360 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1361 && mode0
!= VOIDmode
)
1362 xop0
= copy_to_mode_reg (mode0
, xop0
);
1364 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1365 && mode1
!= VOIDmode
)
1366 xop1
= copy_to_mode_reg (mode1
, xop1
);
1368 if (binoptab
== vec_pack_mod_optab
1369 || binoptab
== vec_pack_usat_optab
1370 || binoptab
== vec_pack_ssat_optab
)
1372 /* The mode of the result is different then the mode of the
1374 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1375 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1381 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1382 temp
= gen_reg_rtx (tmp_mode
);
1384 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1387 /* If PAT is composed of more than one insn, try to add an appropriate
1388 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1389 operand, call ourselves again, this time without a target. */
1390 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1391 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1393 delete_insns_since (last
);
1394 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1395 unsignedp
, methods
);
1402 delete_insns_since (last
);
1405 /* If we were trying to rotate by a constant value, and that didn't
1406 work, try rotating the other direction before falling back to
1407 shifts and bitwise-or. */
1409 && (binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1410 && class == MODE_INT
1411 && GET_CODE (op1
) == CONST_INT
1413 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1415 first_pass_p
= false;
1416 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
1417 binoptab
= binoptab
== rotl_optab
? rotr_optab
: rotl_optab
;
1421 /* If this is a multiply, see if we can do a widening operation that
1422 takes operands of this mode and makes a wider mode. */
1424 if (binoptab
== smul_optab
1425 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1426 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1427 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1428 != CODE_FOR_nothing
))
1430 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1431 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1432 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1436 if (GET_MODE_CLASS (mode
) == MODE_INT
1437 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1438 GET_MODE_BITSIZE (GET_MODE (temp
))))
1439 return gen_lowpart (mode
, temp
);
1441 return convert_to_mode (mode
, temp
, unsignedp
);
1445 /* Look for a wider mode of the same class for which we think we
1446 can open-code the operation. Check for a widening multiply at the
1447 wider mode as well. */
1449 if (CLASS_HAS_WIDER_MODES_P (class)
1450 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1451 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1452 wider_mode
!= VOIDmode
;
1453 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1455 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1456 || (binoptab
== smul_optab
1457 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1458 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1459 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1460 != CODE_FOR_nothing
)))
1462 rtx xop0
= op0
, xop1
= op1
;
1465 /* For certain integer operations, we need not actually extend
1466 the narrow operands, as long as we will truncate
1467 the results to the same narrowness. */
1469 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1470 || binoptab
== xor_optab
1471 || binoptab
== add_optab
|| binoptab
== sub_optab
1472 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1473 && class == MODE_INT
)
1476 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1478 /* The second operand of a shift must always be extended. */
1479 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1480 no_extend
&& binoptab
!= ashl_optab
);
1482 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1483 unsignedp
, OPTAB_DIRECT
);
1486 if (class != MODE_INT
1487 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1488 GET_MODE_BITSIZE (wider_mode
)))
1491 target
= gen_reg_rtx (mode
);
1492 convert_move (target
, temp
, 0);
1496 return gen_lowpart (mode
, temp
);
1499 delete_insns_since (last
);
1503 /* These can be done a word at a time. */
1504 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1505 && class == MODE_INT
1506 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1507 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1513 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1514 won't be accurate, so use a new target. */
1515 if (target
== 0 || target
== op0
|| target
== op1
)
1516 target
= gen_reg_rtx (mode
);
1520 /* Do the actual arithmetic. */
1521 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1523 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1524 rtx x
= expand_binop (word_mode
, binoptab
,
1525 operand_subword_force (op0
, i
, mode
),
1526 operand_subword_force (op1
, i
, mode
),
1527 target_piece
, unsignedp
, next_methods
);
1532 if (target_piece
!= x
)
1533 emit_move_insn (target_piece
, x
);
1536 insns
= get_insns ();
1539 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1541 if (binoptab
->code
!= UNKNOWN
)
1543 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1544 copy_rtx (op0
), copy_rtx (op1
));
1548 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1553 /* Synthesize double word shifts from single word shifts. */
1554 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1555 || binoptab
== ashr_optab
)
1556 && class == MODE_INT
1557 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1558 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1559 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1560 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1561 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1563 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1564 enum machine_mode op1_mode
;
1566 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1567 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1568 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1570 /* Apply the truncation to constant shifts. */
1571 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1572 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1574 if (op1
== CONST0_RTX (op1_mode
))
1577 /* Make sure that this is a combination that expand_doubleword_shift
1578 can handle. See the comments there for details. */
1579 if (double_shift_mask
== 0
1580 || (shift_mask
== BITS_PER_WORD
- 1
1581 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1583 rtx insns
, equiv_value
;
1584 rtx into_target
, outof_target
;
1585 rtx into_input
, outof_input
;
1586 int left_shift
, outof_word
;
1588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1589 won't be accurate, so use a new target. */
1590 if (target
== 0 || target
== op0
|| target
== op1
)
1591 target
= gen_reg_rtx (mode
);
1595 /* OUTOF_* is the word we are shifting bits away from, and
1596 INTO_* is the word that we are shifting bits towards, thus
1597 they differ depending on the direction of the shift and
1598 WORDS_BIG_ENDIAN. */
1600 left_shift
= binoptab
== ashl_optab
;
1601 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1603 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1604 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1606 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1607 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1609 if (expand_doubleword_shift (op1_mode
, binoptab
,
1610 outof_input
, into_input
, op1
,
1611 outof_target
, into_target
,
1612 unsignedp
, next_methods
, shift_mask
))
1614 insns
= get_insns ();
1617 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1618 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1625 /* Synthesize double word rotates from single word shifts. */
1626 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1627 && class == MODE_INT
1628 && GET_CODE (op1
) == CONST_INT
1629 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1630 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1631 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1634 rtx into_target
, outof_target
;
1635 rtx into_input
, outof_input
;
1637 int shift_count
, left_shift
, outof_word
;
1639 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1640 won't be accurate, so use a new target. Do this also if target is not
1641 a REG, first because having a register instead may open optimization
1642 opportunities, and second because if target and op0 happen to be MEMs
1643 designating the same location, we would risk clobbering it too early
1644 in the code sequence we generate below. */
1645 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1646 target
= gen_reg_rtx (mode
);
1650 shift_count
= INTVAL (op1
);
1652 /* OUTOF_* is the word we are shifting bits away from, and
1653 INTO_* is the word that we are shifting bits towards, thus
1654 they differ depending on the direction of the shift and
1655 WORDS_BIG_ENDIAN. */
1657 left_shift
= (binoptab
== rotl_optab
);
1658 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1660 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1661 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1663 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1664 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1666 if (shift_count
== BITS_PER_WORD
)
1668 /* This is just a word swap. */
1669 emit_move_insn (outof_target
, into_input
);
1670 emit_move_insn (into_target
, outof_input
);
1675 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1676 rtx first_shift_count
, second_shift_count
;
1677 optab reverse_unsigned_shift
, unsigned_shift
;
1679 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1680 ? lshr_optab
: ashl_optab
);
1682 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1683 ? ashl_optab
: lshr_optab
);
1685 if (shift_count
> BITS_PER_WORD
)
1687 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1688 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1692 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1693 second_shift_count
= GEN_INT (shift_count
);
1696 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1697 outof_input
, first_shift_count
,
1698 NULL_RTX
, unsignedp
, next_methods
);
1699 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1700 into_input
, second_shift_count
,
1701 NULL_RTX
, unsignedp
, next_methods
);
1703 if (into_temp1
!= 0 && into_temp2
!= 0)
1704 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1705 into_target
, unsignedp
, next_methods
);
1709 if (inter
!= 0 && inter
!= into_target
)
1710 emit_move_insn (into_target
, inter
);
1712 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1713 into_input
, first_shift_count
,
1714 NULL_RTX
, unsignedp
, next_methods
);
1715 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1716 outof_input
, second_shift_count
,
1717 NULL_RTX
, unsignedp
, next_methods
);
1719 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1720 inter
= expand_binop (word_mode
, ior_optab
,
1721 outof_temp1
, outof_temp2
,
1722 outof_target
, unsignedp
, next_methods
);
1724 if (inter
!= 0 && inter
!= outof_target
)
1725 emit_move_insn (outof_target
, inter
);
1728 insns
= get_insns ();
1733 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1734 block to help the register allocator a bit. But a multi-word
1735 rotate will need all the input bits when setting the output
1736 bits, so there clearly is a conflict between the input and
1737 output registers. So we can't use a no-conflict block here. */
1743 /* These can be done a word at a time by propagating carries. */
1744 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1745 && class == MODE_INT
1746 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1747 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1750 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1751 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1752 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1753 rtx xop0
, xop1
, xtarget
;
1755 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1756 value is one of those, use it. Otherwise, use 1 since it is the
1757 one easiest to get. */
1758 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1759 int normalizep
= STORE_FLAG_VALUE
;
1764 /* Prepare the operands. */
1765 xop0
= force_reg (mode
, op0
);
1766 xop1
= force_reg (mode
, op1
);
1768 xtarget
= gen_reg_rtx (mode
);
1770 if (target
== 0 || !REG_P (target
))
1773 /* Indicate for flow that the entire target reg is being set. */
1775 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1777 /* Do the actual arithmetic. */
1778 for (i
= 0; i
< nwords
; i
++)
1780 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1781 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1782 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1783 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1786 /* Main add/subtract of the input operands. */
1787 x
= expand_binop (word_mode
, binoptab
,
1788 op0_piece
, op1_piece
,
1789 target_piece
, unsignedp
, next_methods
);
1795 /* Store carry from main add/subtract. */
1796 carry_out
= gen_reg_rtx (word_mode
);
1797 carry_out
= emit_store_flag_force (carry_out
,
1798 (binoptab
== add_optab
1801 word_mode
, 1, normalizep
);
1808 /* Add/subtract previous carry to main result. */
1809 newx
= expand_binop (word_mode
,
1810 normalizep
== 1 ? binoptab
: otheroptab
,
1812 NULL_RTX
, 1, next_methods
);
1816 /* Get out carry from adding/subtracting carry in. */
1817 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1818 carry_tmp
= emit_store_flag_force (carry_tmp
,
1819 (binoptab
== add_optab
1822 word_mode
, 1, normalizep
);
1824 /* Logical-ior the two poss. carry together. */
1825 carry_out
= expand_binop (word_mode
, ior_optab
,
1826 carry_out
, carry_tmp
,
1827 carry_out
, 0, next_methods
);
1831 emit_move_insn (target_piece
, newx
);
1835 if (x
!= target_piece
)
1836 emit_move_insn (target_piece
, x
);
1839 carry_in
= carry_out
;
1842 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1844 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1845 || ! rtx_equal_p (target
, xtarget
))
1847 rtx temp
= emit_move_insn (target
, xtarget
);
1849 set_unique_reg_note (temp
,
1851 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1862 delete_insns_since (last
);
1865 /* Attempt to synthesize double word multiplies using a sequence of word
1866 mode multiplications. We first attempt to generate a sequence using a
1867 more efficient unsigned widening multiply, and if that fails we then
1868 try using a signed widening multiply. */
1870 if (binoptab
== smul_optab
1871 && class == MODE_INT
1872 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1873 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1874 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1876 rtx product
= NULL_RTX
;
1878 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1879 != CODE_FOR_nothing
)
1881 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1884 delete_insns_since (last
);
1887 if (product
== NULL_RTX
1888 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1889 != CODE_FOR_nothing
)
1891 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1894 delete_insns_since (last
);
1897 if (product
!= NULL_RTX
)
1899 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1901 temp
= emit_move_insn (target
? target
: product
, product
);
1902 set_unique_reg_note (temp
,
1904 gen_rtx_fmt_ee (MULT
, mode
,
1912 /* It can't be open-coded in this mode.
1913 Use a library call if one is available and caller says that's ok. */
1915 if (binoptab
->handlers
[(int) mode
].libfunc
1916 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1920 enum machine_mode op1_mode
= mode
;
1927 op1_mode
= word_mode
;
1928 /* Specify unsigned here,
1929 since negative shift counts are meaningless. */
1930 op1x
= convert_to_mode (word_mode
, op1
, 1);
1933 if (GET_MODE (op0
) != VOIDmode
1934 && GET_MODE (op0
) != mode
)
1935 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1937 /* Pass 1 for NO_QUEUE so we don't lose any increments
1938 if the libcall is cse'd or moved. */
1939 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1940 NULL_RTX
, LCT_CONST
, mode
, 2,
1941 op0
, mode
, op1x
, op1_mode
);
1943 insns
= get_insns ();
1946 target
= gen_reg_rtx (mode
);
1947 emit_libcall_block (insns
, target
, value
,
1948 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1953 delete_insns_since (last
);
1955 /* It can't be done in this mode. Can we do it in a wider mode? */
1957 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1958 || methods
== OPTAB_MUST_WIDEN
))
1960 /* Caller says, don't even try. */
1961 delete_insns_since (entry_last
);
1965 /* Compute the value of METHODS to pass to recursive calls.
1966 Don't allow widening to be tried recursively. */
1968 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1970 /* Look for a wider mode of the same class for which it appears we can do
1973 if (CLASS_HAS_WIDER_MODES_P (class))
1975 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1976 wider_mode
!= VOIDmode
;
1977 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1979 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1980 != CODE_FOR_nothing
)
1981 || (methods
== OPTAB_LIB
1982 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1984 rtx xop0
= op0
, xop1
= op1
;
1987 /* For certain integer operations, we need not actually extend
1988 the narrow operands, as long as we will truncate
1989 the results to the same narrowness. */
1991 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1992 || binoptab
== xor_optab
1993 || binoptab
== add_optab
|| binoptab
== sub_optab
1994 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1995 && class == MODE_INT
)
1998 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1999 unsignedp
, no_extend
);
2001 /* The second operand of a shift must always be extended. */
2002 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2003 no_extend
&& binoptab
!= ashl_optab
);
2005 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2006 unsignedp
, methods
);
2009 if (class != MODE_INT
2010 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2011 GET_MODE_BITSIZE (wider_mode
)))
2014 target
= gen_reg_rtx (mode
);
2015 convert_move (target
, temp
, 0);
2019 return gen_lowpart (mode
, temp
);
2022 delete_insns_since (last
);
2027 delete_insns_since (entry_last
);
2031 /* Expand a binary operator which has both signed and unsigned forms.
2032 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2035 If we widen unsigned operands, we may use a signed wider operation instead
2036 of an unsigned wider operation, since the result would be the same. */
2039 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2040 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2041 enum optab_methods methods
)
2044 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2045 struct optab wide_soptab
;
2047 /* Do it without widening, if possible. */
2048 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2049 unsignedp
, OPTAB_DIRECT
);
2050 if (temp
|| methods
== OPTAB_DIRECT
)
2053 /* Try widening to a signed int. Make a fake signed optab that
2054 hides any signed insn for direct use. */
2055 wide_soptab
= *soptab
;
2056 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2057 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2059 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2060 unsignedp
, OPTAB_WIDEN
);
2062 /* For unsigned operands, try widening to an unsigned int. */
2063 if (temp
== 0 && unsignedp
)
2064 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2065 unsignedp
, OPTAB_WIDEN
);
2066 if (temp
|| methods
== OPTAB_WIDEN
)
2069 /* Use the right width lib call if that exists. */
2070 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2071 if (temp
|| methods
== OPTAB_LIB
)
2074 /* Must widen and use a lib call, use either signed or unsigned. */
2075 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2076 unsignedp
, methods
);
2080 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2081 unsignedp
, methods
);
2085 /* Generate code to perform an operation specified by UNOPPTAB
2086 on operand OP0, with two results to TARG0 and TARG1.
2087 We assume that the order of the operands for the instruction
2088 is TARG0, TARG1, OP0.
2090 Either TARG0 or TARG1 may be zero, but what that means is that
2091 the result is not actually wanted. We will generate it into
2092 a dummy pseudo-reg and discard it. They may not both be zero.
2094 Returns 1 if this operation can be performed; 0 if not. */
2097 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2100 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2101 enum mode_class
class;
2102 enum machine_mode wider_mode
;
2103 rtx entry_last
= get_last_insn ();
2106 class = GET_MODE_CLASS (mode
);
2109 targ0
= gen_reg_rtx (mode
);
2111 targ1
= gen_reg_rtx (mode
);
2113 /* Record where to go back to if we fail. */
2114 last
= get_last_insn ();
2116 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2118 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2119 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2123 if (GET_MODE (xop0
) != VOIDmode
2124 && GET_MODE (xop0
) != mode0
)
2125 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2127 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2128 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2129 xop0
= copy_to_mode_reg (mode0
, xop0
);
2131 /* We could handle this, but we should always be called with a pseudo
2132 for our targets and all insns should take them as outputs. */
2133 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2134 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2136 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2143 delete_insns_since (last
);
2146 /* It can't be done in this mode. Can we do it in a wider mode? */
2148 if (CLASS_HAS_WIDER_MODES_P (class))
2150 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2151 wider_mode
!= VOIDmode
;
2152 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2154 if (unoptab
->handlers
[(int) wider_mode
].insn_code
2155 != CODE_FOR_nothing
)
2157 rtx t0
= gen_reg_rtx (wider_mode
);
2158 rtx t1
= gen_reg_rtx (wider_mode
);
2159 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2161 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2163 convert_move (targ0
, t0
, unsignedp
);
2164 convert_move (targ1
, t1
, unsignedp
);
2168 delete_insns_since (last
);
2173 delete_insns_since (entry_last
);
2177 /* Generate code to perform an operation specified by BINOPTAB
2178 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2179 We assume that the order of the operands for the instruction
2180 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2181 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2183 Either TARG0 or TARG1 may be zero, but what that means is that
2184 the result is not actually wanted. We will generate it into
2185 a dummy pseudo-reg and discard it. They may not both be zero.
2187 Returns 1 if this operation can be performed; 0 if not. */
2190 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2193 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2194 enum mode_class
class;
2195 enum machine_mode wider_mode
;
2196 rtx entry_last
= get_last_insn ();
2199 class = GET_MODE_CLASS (mode
);
2201 /* If we are inside an appropriately-short loop and we are optimizing,
2202 force expensive constants into a register. */
2203 if (CONSTANT_P (op0
) && optimize
2204 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2205 op0
= force_reg (mode
, op0
);
2207 if (CONSTANT_P (op1
) && optimize
2208 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2209 op1
= force_reg (mode
, op1
);
2212 targ0
= gen_reg_rtx (mode
);
2214 targ1
= gen_reg_rtx (mode
);
2216 /* Record where to go back to if we fail. */
2217 last
= get_last_insn ();
2219 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2221 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2222 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2223 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2225 rtx xop0
= op0
, xop1
= op1
;
2227 /* In case the insn wants input operands in modes different from
2228 those of the actual operands, convert the operands. It would
2229 seem that we don't need to convert CONST_INTs, but we do, so
2230 that they're properly zero-extended, sign-extended or truncated
2233 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2234 xop0
= convert_modes (mode0
,
2235 GET_MODE (op0
) != VOIDmode
2240 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2241 xop1
= convert_modes (mode1
,
2242 GET_MODE (op1
) != VOIDmode
2247 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2248 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2249 xop0
= copy_to_mode_reg (mode0
, xop0
);
2251 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2252 xop1
= copy_to_mode_reg (mode1
, xop1
);
2254 /* We could handle this, but we should always be called with a pseudo
2255 for our targets and all insns should take them as outputs. */
2256 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2257 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2259 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2266 delete_insns_since (last
);
2269 /* It can't be done in this mode. Can we do it in a wider mode? */
2271 if (CLASS_HAS_WIDER_MODES_P (class))
2273 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2274 wider_mode
!= VOIDmode
;
2275 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2277 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2278 != CODE_FOR_nothing
)
2280 rtx t0
= gen_reg_rtx (wider_mode
);
2281 rtx t1
= gen_reg_rtx (wider_mode
);
2282 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2283 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2285 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2288 convert_move (targ0
, t0
, unsignedp
);
2289 convert_move (targ1
, t1
, unsignedp
);
2293 delete_insns_since (last
);
2298 delete_insns_since (entry_last
);
2302 /* Expand the two-valued library call indicated by BINOPTAB, but
2303 preserve only one of the values. If TARG0 is non-NULL, the first
2304 value is placed into TARG0; otherwise the second value is placed
2305 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2306 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2307 This routine assumes that the value returned by the library call is
2308 as if the return value was of an integral mode twice as wide as the
2309 mode of OP0. Returns 1 if the call was successful. */
2312 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2313 rtx targ0
, rtx targ1
, enum rtx_code code
)
2315 enum machine_mode mode
;
2316 enum machine_mode libval_mode
;
2320 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2321 gcc_assert (!targ0
!= !targ1
);
2323 mode
= GET_MODE (op0
);
2324 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2327 /* The value returned by the library function will have twice as
2328 many bits as the nominal MODE. */
2329 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2332 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2333 NULL_RTX
, LCT_CONST
,
2337 /* Get the part of VAL containing the value that we want. */
2338 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2339 targ0
? 0 : GET_MODE_SIZE (mode
));
2340 insns
= get_insns ();
2342 /* Move the into the desired location. */
2343 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2344 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2350 /* Wrapper around expand_unop which takes an rtx code to specify
2351 the operation to perform, not an optab pointer. All other
2352 arguments are the same. */
2354 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2355 rtx target
, int unsignedp
)
2357 optab unop
= code_to_optab
[(int) code
];
2360 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2366 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2368 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2370 enum mode_class
class = GET_MODE_CLASS (mode
);
2371 if (CLASS_HAS_WIDER_MODES_P (class))
2373 enum machine_mode wider_mode
;
2374 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2375 wider_mode
!= VOIDmode
;
2376 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2378 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2379 != CODE_FOR_nothing
)
2381 rtx xop0
, temp
, last
;
2383 last
= get_last_insn ();
2386 target
= gen_reg_rtx (mode
);
2387 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2388 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2390 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2391 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2392 - GET_MODE_BITSIZE (mode
)),
2393 target
, true, OPTAB_DIRECT
);
2395 delete_insns_since (last
);
2404 /* Try calculating (parity x) as (and (popcount x) 1), where
2405 popcount can also be done in a wider mode. */
2407 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2409 enum mode_class
class = GET_MODE_CLASS (mode
);
2410 if (CLASS_HAS_WIDER_MODES_P (class))
2412 enum machine_mode wider_mode
;
2413 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2414 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2416 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2417 != CODE_FOR_nothing
)
2419 rtx xop0
, temp
, last
;
2421 last
= get_last_insn ();
2424 target
= gen_reg_rtx (mode
);
2425 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2426 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2429 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2430 target
, true, OPTAB_DIRECT
);
2432 delete_insns_since (last
);
2441 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2442 conditions, VAL may already be a SUBREG against which we cannot generate
2443 a further SUBREG. In this case, we expect forcing the value into a
2444 register will work around the situation. */
2447 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2448 enum machine_mode imode
)
2451 ret
= lowpart_subreg (omode
, val
, imode
);
2454 val
= force_reg (imode
, val
);
2455 ret
= lowpart_subreg (omode
, val
, imode
);
2456 gcc_assert (ret
!= NULL
);
2461 /* Expand a floating point absolute value or negation operation via a
2462 logical operation on the sign bit. */
2465 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2466 rtx op0
, rtx target
)
2468 const struct real_format
*fmt
;
2469 int bitpos
, word
, nwords
, i
;
2470 enum machine_mode imode
;
2471 HOST_WIDE_INT hi
, lo
;
2474 /* The format has to have a simple sign bit. */
2475 fmt
= REAL_MODE_FORMAT (mode
);
2479 bitpos
= fmt
->signbit_rw
;
2483 /* Don't create negative zeros if the format doesn't support them. */
2484 if (code
== NEG
&& !fmt
->has_signed_zero
)
2487 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2489 imode
= int_mode_for_mode (mode
);
2490 if (imode
== BLKmode
)
2499 if (FLOAT_WORDS_BIG_ENDIAN
)
2500 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2502 word
= bitpos
/ BITS_PER_WORD
;
2503 bitpos
= bitpos
% BITS_PER_WORD
;
2504 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2507 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2510 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2514 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2520 if (target
== 0 || target
== op0
)
2521 target
= gen_reg_rtx (mode
);
2527 for (i
= 0; i
< nwords
; ++i
)
2529 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2530 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2534 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2536 immed_double_const (lo
, hi
, imode
),
2537 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2538 if (temp
!= targ_piece
)
2539 emit_move_insn (targ_piece
, temp
);
2542 emit_move_insn (targ_piece
, op0_piece
);
2545 insns
= get_insns ();
2548 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2549 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2553 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2554 gen_lowpart (imode
, op0
),
2555 immed_double_const (lo
, hi
, imode
),
2556 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2557 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2559 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2560 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2566 /* Generate code to perform an operation specified by UNOPTAB
2567 on operand OP0, with result having machine-mode MODE.
2569 UNSIGNEDP is for the case where we have to widen the operands
2570 to perform the operation. It says to use zero-extension.
2572 If TARGET is nonzero, the value
2573 is generated there, if it is convenient to do so.
2574 In all cases an rtx is returned for the locus of the value;
2575 this may or may not be TARGET. */
2578 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2581 enum mode_class
class;
2582 enum machine_mode wider_mode
;
2584 rtx last
= get_last_insn ();
2587 class = GET_MODE_CLASS (mode
);
2589 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2591 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2592 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2598 temp
= gen_reg_rtx (mode
);
2600 if (GET_MODE (xop0
) != VOIDmode
2601 && GET_MODE (xop0
) != mode0
)
2602 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2604 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2606 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2607 xop0
= copy_to_mode_reg (mode0
, xop0
);
2609 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2610 temp
= gen_reg_rtx (mode
);
2612 pat
= GEN_FCN (icode
) (temp
, xop0
);
2615 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2616 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2618 delete_insns_since (last
);
2619 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2627 delete_insns_since (last
);
2630 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2632 /* Widening clz needs special treatment. */
2633 if (unoptab
== clz_optab
)
2635 temp
= widen_clz (mode
, op0
, target
);
2642 /* We can't widen a bswap. */
2643 if (unoptab
== bswap_optab
)
2646 if (CLASS_HAS_WIDER_MODES_P (class))
2647 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2648 wider_mode
!= VOIDmode
;
2649 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2651 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2655 /* For certain operations, we need not actually extend
2656 the narrow operand, as long as we will truncate the
2657 results to the same narrowness. */
2659 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2660 (unoptab
== neg_optab
2661 || unoptab
== one_cmpl_optab
)
2662 && class == MODE_INT
);
2664 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2669 if (class != MODE_INT
2670 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2671 GET_MODE_BITSIZE (wider_mode
)))
2674 target
= gen_reg_rtx (mode
);
2675 convert_move (target
, temp
, 0);
2679 return gen_lowpart (mode
, temp
);
2682 delete_insns_since (last
);
2686 /* These can be done a word at a time. */
2687 if (unoptab
== one_cmpl_optab
2688 && class == MODE_INT
2689 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2690 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2695 if (target
== 0 || target
== op0
)
2696 target
= gen_reg_rtx (mode
);
2700 /* Do the actual arithmetic. */
2701 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2703 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2704 rtx x
= expand_unop (word_mode
, unoptab
,
2705 operand_subword_force (op0
, i
, mode
),
2706 target_piece
, unsignedp
);
2708 if (target_piece
!= x
)
2709 emit_move_insn (target_piece
, x
);
2712 insns
= get_insns ();
2715 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2716 gen_rtx_fmt_e (unoptab
->code
, mode
,
2721 if (unoptab
->code
== NEG
)
2723 /* Try negating floating point values by flipping the sign bit. */
2724 if (SCALAR_FLOAT_MODE_P (mode
))
2726 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2731 /* If there is no negation pattern, and we have no negative zero,
2732 try subtracting from zero. */
2733 if (!HONOR_SIGNED_ZEROS (mode
))
2735 temp
= expand_binop (mode
, (unoptab
== negv_optab
2736 ? subv_optab
: sub_optab
),
2737 CONST0_RTX (mode
), op0
, target
,
2738 unsignedp
, OPTAB_DIRECT
);
2744 /* Try calculating parity (x) as popcount (x) % 2. */
2745 if (unoptab
== parity_optab
)
2747 temp
= expand_parity (mode
, op0
, target
);
2753 /* Now try a library call in this mode. */
2754 if (unoptab
->handlers
[(int) mode
].libfunc
)
2758 enum machine_mode outmode
= mode
;
2760 /* All of these functions return small values. Thus we choose to
2761 have them return something that isn't a double-word. */
2762 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2763 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2765 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2769 /* Pass 1 for NO_QUEUE so we don't lose any increments
2770 if the libcall is cse'd or moved. */
2771 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2772 NULL_RTX
, LCT_CONST
, outmode
,
2774 insns
= get_insns ();
2777 target
= gen_reg_rtx (outmode
);
2778 emit_libcall_block (insns
, target
, value
,
2779 gen_rtx_fmt_e (unoptab
->code
, outmode
, op0
));
2784 /* It can't be done in this mode. Can we do it in a wider mode? */
2786 if (CLASS_HAS_WIDER_MODES_P (class))
2788 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2789 wider_mode
!= VOIDmode
;
2790 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2792 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2793 != CODE_FOR_nothing
)
2794 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2798 /* For certain operations, we need not actually extend
2799 the narrow operand, as long as we will truncate the
2800 results to the same narrowness. */
2802 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2803 (unoptab
== neg_optab
2804 || unoptab
== one_cmpl_optab
)
2805 && class == MODE_INT
);
2807 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2810 /* If we are generating clz using wider mode, adjust the
2812 if (unoptab
== clz_optab
&& temp
!= 0)
2813 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2814 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2815 - GET_MODE_BITSIZE (mode
)),
2816 target
, true, OPTAB_DIRECT
);
2820 if (class != MODE_INT
)
2823 target
= gen_reg_rtx (mode
);
2824 convert_move (target
, temp
, 0);
2828 return gen_lowpart (mode
, temp
);
2831 delete_insns_since (last
);
2836 /* One final attempt at implementing negation via subtraction,
2837 this time allowing widening of the operand. */
2838 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2841 temp
= expand_binop (mode
,
2842 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2843 CONST0_RTX (mode
), op0
,
2844 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2852 /* Emit code to compute the absolute value of OP0, with result to
2853 TARGET if convenient. (TARGET may be 0.) The return value says
2854 where the result actually is to be found.
2856 MODE is the mode of the operand; the mode of the result is
2857 different but can be deduced from MODE.
2862 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2863 int result_unsignedp
)
2868 result_unsignedp
= 1;
2870 /* First try to do it with a special abs instruction. */
2871 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2876 /* For floating point modes, try clearing the sign bit. */
2877 if (SCALAR_FLOAT_MODE_P (mode
))
2879 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2884 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2885 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2886 && !HONOR_SIGNED_ZEROS (mode
))
2888 rtx last
= get_last_insn ();
2890 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2892 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2898 delete_insns_since (last
);
2901 /* If this machine has expensive jumps, we can do integer absolute
2902 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2903 where W is the width of MODE. */
2905 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2907 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2908 size_int (GET_MODE_BITSIZE (mode
) - 1),
2911 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2914 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2915 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2925 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2926 int result_unsignedp
, int safe
)
2931 result_unsignedp
= 1;
2933 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2937 /* If that does not win, use conditional jump and negate. */
2939 /* It is safe to use the target if it is the same
2940 as the source if this is also a pseudo register */
2941 if (op0
== target
&& REG_P (op0
)
2942 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2945 op1
= gen_label_rtx ();
2946 if (target
== 0 || ! safe
2947 || GET_MODE (target
) != mode
2948 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2950 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2951 target
= gen_reg_rtx (mode
);
2953 emit_move_insn (target
, op0
);
2956 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2957 NULL_RTX
, NULL_RTX
, op1
);
2959 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2962 emit_move_insn (target
, op0
);
2968 /* A subroutine of expand_copysign, perform the copysign operation using the
2969 abs and neg primitives advertised to exist on the target. The assumption
2970 is that we have a split register file, and leaving op0 in fp registers,
2971 and not playing with subregs so much, will help the register allocator. */
2974 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2975 int bitpos
, bool op0_is_abs
)
2977 enum machine_mode imode
;
2978 HOST_WIDE_INT hi
, lo
;
2987 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2994 if (target
== NULL_RTX
)
2995 target
= copy_to_reg (op0
);
2997 emit_move_insn (target
, op0
);
3000 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3002 imode
= int_mode_for_mode (mode
);
3003 if (imode
== BLKmode
)
3005 op1
= gen_lowpart (imode
, op1
);
3010 if (FLOAT_WORDS_BIG_ENDIAN
)
3011 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3013 word
= bitpos
/ BITS_PER_WORD
;
3014 bitpos
= bitpos
% BITS_PER_WORD
;
3015 op1
= operand_subword_force (op1
, word
, mode
);
3018 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3021 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3025 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3029 op1
= expand_binop (imode
, and_optab
, op1
,
3030 immed_double_const (lo
, hi
, imode
),
3031 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3033 label
= gen_label_rtx ();
3034 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3036 if (GET_CODE (op0
) == CONST_DOUBLE
)
3037 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3039 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3041 emit_move_insn (target
, op0
);
3049 /* A subroutine of expand_copysign, perform the entire copysign operation
3050 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3051 is true if op0 is known to have its sign bit clear. */
3054 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3055 int bitpos
, bool op0_is_abs
)
3057 enum machine_mode imode
;
3058 HOST_WIDE_INT hi
, lo
;
3059 int word
, nwords
, i
;
3062 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3064 imode
= int_mode_for_mode (mode
);
3065 if (imode
== BLKmode
)
3074 if (FLOAT_WORDS_BIG_ENDIAN
)
3075 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3077 word
= bitpos
/ BITS_PER_WORD
;
3078 bitpos
= bitpos
% BITS_PER_WORD
;
3079 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3082 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3085 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3089 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3093 if (target
== 0 || target
== op0
|| target
== op1
)
3094 target
= gen_reg_rtx (mode
);
3100 for (i
= 0; i
< nwords
; ++i
)
3102 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3103 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3108 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3109 immed_double_const (~lo
, ~hi
, imode
),
3110 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3112 op1
= expand_binop (imode
, and_optab
,
3113 operand_subword_force (op1
, i
, mode
),
3114 immed_double_const (lo
, hi
, imode
),
3115 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3117 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3118 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3119 if (temp
!= targ_piece
)
3120 emit_move_insn (targ_piece
, temp
);
3123 emit_move_insn (targ_piece
, op0_piece
);
3126 insns
= get_insns ();
3129 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3133 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3134 immed_double_const (lo
, hi
, imode
),
3135 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3137 op0
= gen_lowpart (imode
, op0
);
3139 op0
= expand_binop (imode
, and_optab
, op0
,
3140 immed_double_const (~lo
, ~hi
, imode
),
3141 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3143 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3144 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3145 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3151 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3152 scalar floating point mode. Return NULL if we do not know how to
3153 expand the operation inline. */
3156 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3158 enum machine_mode mode
= GET_MODE (op0
);
3159 const struct real_format
*fmt
;
3163 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3164 gcc_assert (GET_MODE (op1
) == mode
);
3166 /* First try to do it with a special instruction. */
3167 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3168 target
, 0, OPTAB_DIRECT
);
3172 fmt
= REAL_MODE_FORMAT (mode
);
3173 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3177 if (GET_CODE (op0
) == CONST_DOUBLE
)
3179 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3180 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3184 if (fmt
->signbit_ro
>= 0
3185 && (GET_CODE (op0
) == CONST_DOUBLE
3186 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
3187 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
3189 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3190 fmt
->signbit_ro
, op0_is_abs
);
3195 if (fmt
->signbit_rw
< 0)
3197 return expand_copysign_bit (mode
, op0
, op1
, target
,
3198 fmt
->signbit_rw
, op0_is_abs
);
3201 /* Generate an instruction whose insn-code is INSN_CODE,
3202 with two operands: an output TARGET and an input OP0.
3203 TARGET *must* be nonzero, and the output is always stored there.
3204 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3205 the value that is stored into TARGET. */
3208 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3211 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3216 /* Now, if insn does not accept our operands, put them into pseudos. */
3218 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3219 op0
= copy_to_mode_reg (mode0
, op0
);
3221 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3222 temp
= gen_reg_rtx (GET_MODE (temp
));
3224 pat
= GEN_FCN (icode
) (temp
, op0
);
3226 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3227 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3232 emit_move_insn (target
, temp
);
3235 struct no_conflict_data
3237 rtx target
, first
, insn
;
3241 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3242 Set P->must_stay if the currently examined clobber / store has to stay
3243 in the list of insns that constitute the actual no_conflict block /
3246 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
3248 struct no_conflict_data
*p
= p0
;
3250 /* If this inns directly contributes to setting the target, it must stay. */
3251 if (reg_overlap_mentioned_p (p
->target
, dest
))
3252 p
->must_stay
= true;
3253 /* If we haven't committed to keeping any other insns in the list yet,
3254 there is nothing more to check. */
3255 else if (p
->insn
== p
->first
)
3257 /* If this insn sets / clobbers a register that feeds one of the insns
3258 already in the list, this insn has to stay too. */
3259 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3260 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3261 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3262 /* Likewise if this insn depends on a register set by a previous
3263 insn in the list, or if it sets a result (presumably a hard
3264 register) that is set or clobbered by a previous insn.
3265 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3266 SET_DEST perform the former check on the address, and the latter
3267 check on the MEM. */
3268 || (GET_CODE (set
) == SET
3269 && (modified_in_p (SET_SRC (set
), p
->first
)
3270 || modified_in_p (SET_DEST (set
), p
->first
)
3271 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3272 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3273 p
->must_stay
= true;
3276 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3277 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3278 is possible to do so. */
3281 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3283 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3285 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3286 encapsulated region would not be in one basic block, i.e. when
3287 there is a control_flow_insn_p insn between FIRST and LAST. */
3288 bool attach_libcall_retval_notes
= true;
3289 rtx insn
, next
= NEXT_INSN (last
);
3291 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3292 if (control_flow_insn_p (insn
))
3294 attach_libcall_retval_notes
= false;
3298 if (attach_libcall_retval_notes
)
3300 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3302 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3308 /* Emit code to perform a series of operations on a multi-word quantity, one
3311 Such a block is preceded by a CLOBBER of the output, consists of multiple
3312 insns, each setting one word of the output, and followed by a SET copying
3313 the output to itself.
3315 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3316 note indicating that it doesn't conflict with the (also multi-word)
3317 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3320 INSNS is a block of code generated to perform the operation, not including
3321 the CLOBBER and final copy. All insns that compute intermediate values
3322 are first emitted, followed by the block as described above.
3324 TARGET, OP0, and OP1 are the output and inputs of the operations,
3325 respectively. OP1 may be zero for a unary operation.
3327 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3330 If TARGET is not a register, INSNS is simply emitted with no special
3331 processing. Likewise if anything in INSNS is not an INSN or if
3332 there is a libcall block inside INSNS.
3334 The final insn emitted is returned. */
3337 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3339 rtx prev
, next
, first
, last
, insn
;
3341 if (!REG_P (target
) || reload_in_progress
)
3342 return emit_insn (insns
);
3344 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3345 if (!NONJUMP_INSN_P (insn
)
3346 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3347 return emit_insn (insns
);
3349 /* First emit all insns that do not store into words of the output and remove
3350 these from the list. */
3351 for (insn
= insns
; insn
; insn
= next
)
3354 struct no_conflict_data data
;
3356 next
= NEXT_INSN (insn
);
3358 /* Some ports (cris) create a libcall regions at their own. We must
3359 avoid any potential nesting of LIBCALLs. */
3360 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3361 remove_note (insn
, note
);
3362 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3363 remove_note (insn
, note
);
3365 data
.target
= target
;
3369 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3370 if (! data
.must_stay
)
3372 if (PREV_INSN (insn
))
3373 NEXT_INSN (PREV_INSN (insn
)) = next
;
3378 PREV_INSN (next
) = PREV_INSN (insn
);
3384 prev
= get_last_insn ();
3386 /* Now write the CLOBBER of the output, followed by the setting of each
3387 of the words, followed by the final copy. */
3388 if (target
!= op0
&& target
!= op1
)
3389 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3391 for (insn
= insns
; insn
; insn
= next
)
3393 next
= NEXT_INSN (insn
);
3396 if (op1
&& REG_P (op1
))
3397 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3400 if (op0
&& REG_P (op0
))
3401 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3405 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3406 != CODE_FOR_nothing
)
3408 last
= emit_move_insn (target
, target
);
3410 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3414 last
= get_last_insn ();
3416 /* Remove any existing REG_EQUAL note from "last", or else it will
3417 be mistaken for a note referring to the full contents of the
3418 alleged libcall value when found together with the REG_RETVAL
3419 note added below. An existing note can come from an insn
3420 expansion at "last". */
3421 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3425 first
= get_insns ();
3427 first
= NEXT_INSN (prev
);
3429 maybe_encapsulate_block (first
, last
, equiv
);
3434 /* Emit code to make a call to a constant function or a library call.
3436 INSNS is a list containing all insns emitted in the call.
3437 These insns leave the result in RESULT. Our block is to copy RESULT
3438 to TARGET, which is logically equivalent to EQUIV.
3440 We first emit any insns that set a pseudo on the assumption that these are
3441 loading constants into registers; doing so allows them to be safely cse'ed
3442 between blocks. Then we emit all the other insns in the block, followed by
3443 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3444 note with an operand of EQUIV.
3446 Moving assignments to pseudos outside of the block is done to improve
3447 the generated code, but is not required to generate correct code,
3448 hence being unable to move an assignment is not grounds for not making
3449 a libcall block. There are two reasons why it is safe to leave these
3450 insns inside the block: First, we know that these pseudos cannot be
3451 used in generated RTL outside the block since they are created for
3452 temporary purposes within the block. Second, CSE will not record the
3453 values of anything set inside a libcall block, so we know they must
3454 be dead at the end of the block.
3456 Except for the first group of insns (the ones setting pseudos), the
3457 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3460 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3462 rtx final_dest
= target
;
3463 rtx prev
, next
, first
, last
, insn
;
3465 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3466 into a MEM later. Protect the libcall block from this change. */
3467 if (! REG_P (target
) || REG_USERVAR_P (target
))
3468 target
= gen_reg_rtx (GET_MODE (target
));
3470 /* If we're using non-call exceptions, a libcall corresponding to an
3471 operation that may trap may also trap. */
3472 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3474 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3477 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3479 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3480 remove_note (insn
, note
);
3484 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3485 reg note to indicate that this call cannot throw or execute a nonlocal
3486 goto (unless there is already a REG_EH_REGION note, in which case
3488 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3491 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3494 XEXP (note
, 0) = constm1_rtx
;
3496 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3500 /* First emit all insns that set pseudos. Remove them from the list as
3501 we go. Avoid insns that set pseudos which were referenced in previous
3502 insns. These can be generated by move_by_pieces, for example,
3503 to update an address. Similarly, avoid insns that reference things
3504 set in previous insns. */
3506 for (insn
= insns
; insn
; insn
= next
)
3508 rtx set
= single_set (insn
);
3511 /* Some ports (cris) create a libcall regions at their own. We must
3512 avoid any potential nesting of LIBCALLs. */
3513 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3514 remove_note (insn
, note
);
3515 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3516 remove_note (insn
, note
);
3518 next
= NEXT_INSN (insn
);
3520 if (set
!= 0 && REG_P (SET_DEST (set
))
3521 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3523 struct no_conflict_data data
;
3525 data
.target
= const0_rtx
;
3529 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3530 if (! data
.must_stay
)
3532 if (PREV_INSN (insn
))
3533 NEXT_INSN (PREV_INSN (insn
)) = next
;
3538 PREV_INSN (next
) = PREV_INSN (insn
);
3544 /* Some ports use a loop to copy large arguments onto the stack.
3545 Don't move anything outside such a loop. */
3550 prev
= get_last_insn ();
3552 /* Write the remaining insns followed by the final copy. */
3554 for (insn
= insns
; insn
; insn
= next
)
3556 next
= NEXT_INSN (insn
);
3561 last
= emit_move_insn (target
, result
);
3562 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3563 != CODE_FOR_nothing
)
3564 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3567 /* Remove any existing REG_EQUAL note from "last", or else it will
3568 be mistaken for a note referring to the full contents of the
3569 libcall value when found together with the REG_RETVAL note added
3570 below. An existing note can come from an insn expansion at
3572 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3575 if (final_dest
!= target
)
3576 emit_move_insn (final_dest
, target
);
3579 first
= get_insns ();
3581 first
= NEXT_INSN (prev
);
3583 maybe_encapsulate_block (first
, last
, equiv
);
3586 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3587 PURPOSE describes how this comparison will be used. CODE is the rtx
3588 comparison code we will be using.
3590 ??? Actually, CODE is slightly weaker than that. A target is still
3591 required to implement all of the normal bcc operations, but not
3592 required to implement all (or any) of the unordered bcc operations. */
3595 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3596 enum can_compare_purpose purpose
)
3600 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3602 if (purpose
== ccp_jump
)
3603 return bcc_gen_fctn
[(int) code
] != NULL
;
3604 else if (purpose
== ccp_store_flag
)
3605 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3607 /* There's only one cmov entry point, and it's allowed to fail. */
3610 if (purpose
== ccp_jump
3611 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3613 if (purpose
== ccp_cmov
3614 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3616 if (purpose
== ccp_store_flag
3617 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3619 mode
= GET_MODE_WIDER_MODE (mode
);
3621 while (mode
!= VOIDmode
);
3626 /* This function is called when we are going to emit a compare instruction that
3627 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3629 *PMODE is the mode of the inputs (in case they are const_int).
3630 *PUNSIGNEDP nonzero says that the operands are unsigned;
3631 this matters if they need to be widened.
3633 If they have mode BLKmode, then SIZE specifies the size of both operands.
3635 This function performs all the setup necessary so that the caller only has
3636 to emit a single comparison insn. This setup can involve doing a BLKmode
3637 comparison or emitting a library call to perform the comparison if no insn
3638 is available to handle it.
3639 The values which are passed in through pointers can be modified; the caller
3640 should perform the comparison on the modified values. Constant
3641 comparisons must have already been folded. */
3644 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3645 enum machine_mode
*pmode
, int *punsignedp
,
3646 enum can_compare_purpose purpose
)
3648 enum machine_mode mode
= *pmode
;
3649 rtx x
= *px
, y
= *py
;
3650 int unsignedp
= *punsignedp
;
3652 /* If we are inside an appropriately-short loop and we are optimizing,
3653 force expensive constants into a register. */
3654 if (CONSTANT_P (x
) && optimize
3655 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3656 x
= force_reg (mode
, x
);
3658 if (CONSTANT_P (y
) && optimize
3659 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3660 y
= force_reg (mode
, y
);
3663 /* Make sure if we have a canonical comparison. The RTL
3664 documentation states that canonical comparisons are required only
3665 for targets which have cc0. */
3666 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3669 /* Don't let both operands fail to indicate the mode. */
3670 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3671 x
= force_reg (mode
, x
);
3673 /* Handle all BLKmode compares. */
3675 if (mode
== BLKmode
)
3677 enum machine_mode cmp_mode
, result_mode
;
3678 enum insn_code cmp_code
;
3683 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3687 /* Try to use a memory block compare insn - either cmpstr
3688 or cmpmem will do. */
3689 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3690 cmp_mode
!= VOIDmode
;
3691 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3693 cmp_code
= cmpmem_optab
[cmp_mode
];
3694 if (cmp_code
== CODE_FOR_nothing
)
3695 cmp_code
= cmpstr_optab
[cmp_mode
];
3696 if (cmp_code
== CODE_FOR_nothing
)
3697 cmp_code
= cmpstrn_optab
[cmp_mode
];
3698 if (cmp_code
== CODE_FOR_nothing
)
3701 /* Must make sure the size fits the insn's mode. */
3702 if ((GET_CODE (size
) == CONST_INT
3703 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3704 || (GET_MODE_BITSIZE (GET_MODE (size
))
3705 > GET_MODE_BITSIZE (cmp_mode
)))
3708 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3709 result
= gen_reg_rtx (result_mode
);
3710 size
= convert_to_mode (cmp_mode
, size
, 1);
3711 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3715 *pmode
= result_mode
;
3719 /* Otherwise call a library function, memcmp. */
3720 libfunc
= memcmp_libfunc
;
3721 length_type
= sizetype
;
3722 result_mode
= TYPE_MODE (integer_type_node
);
3723 cmp_mode
= TYPE_MODE (length_type
);
3724 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3725 TYPE_UNSIGNED (length_type
));
3727 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3734 *pmode
= result_mode
;
3738 /* Don't allow operands to the compare to trap, as that can put the
3739 compare and branch in different basic blocks. */
3740 if (flag_non_call_exceptions
)
3743 x
= force_reg (mode
, x
);
3745 y
= force_reg (mode
, y
);
3750 if (can_compare_p (*pcomparison
, mode
, purpose
))
3753 /* Handle a lib call just for the mode we are using. */
3755 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
3757 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3760 /* If we want unsigned, and this mode has a distinct unsigned
3761 comparison routine, use that. */
3762 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3763 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3765 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3766 word_mode
, 2, x
, mode
, y
, mode
);
3768 /* There are two kinds of comparison routines. Biased routines
3769 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3770 of gcc expect that the comparison operation is equivalent
3771 to the modified comparison. For signed comparisons compare the
3772 result against 1 in the biased case, and zero in the unbiased
3773 case. For unsigned comparisons always compare against 1 after
3774 biasing the unbiased result by adding 1. This gives us a way to
3780 if (!TARGET_LIB_INT_CMP_BIASED
)
3783 *px
= plus_constant (result
, 1);
3790 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3791 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3794 /* Before emitting an insn with code ICODE, make sure that X, which is going
3795 to be used for operand OPNUM of the insn, is converted from mode MODE to
3796 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3797 that it is accepted by the operand predicate. Return the new value. */
3800 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3801 enum machine_mode wider_mode
, int unsignedp
)
3803 if (mode
!= wider_mode
)
3804 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3806 if (!insn_data
[icode
].operand
[opnum
].predicate
3807 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3811 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3817 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3818 we can do the comparison.
3819 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3820 be NULL_RTX which indicates that only a comparison is to be generated. */
3823 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3824 enum rtx_code comparison
, int unsignedp
, rtx label
)
3826 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3827 enum mode_class
class = GET_MODE_CLASS (mode
);
3828 enum machine_mode wider_mode
= mode
;
3830 /* Try combined insns first. */
3833 enum insn_code icode
;
3834 PUT_MODE (test
, wider_mode
);
3838 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3840 if (icode
!= CODE_FOR_nothing
3841 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3843 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3844 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3845 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3850 /* Handle some compares against zero. */
3851 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3852 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3854 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3855 emit_insn (GEN_FCN (icode
) (x
));
3857 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3861 /* Handle compares for which there is a directly suitable insn. */
3863 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3864 if (icode
!= CODE_FOR_nothing
)
3866 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3867 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3868 emit_insn (GEN_FCN (icode
) (x
, y
));
3870 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3874 if (!CLASS_HAS_WIDER_MODES_P (class))
3877 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3879 while (wider_mode
!= VOIDmode
);
3884 /* Generate code to compare X with Y so that the condition codes are
3885 set and to jump to LABEL if the condition is true. If X is a
3886 constant and Y is not a constant, then the comparison is swapped to
3887 ensure that the comparison RTL has the canonical form.
3889 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3890 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3891 the proper branch condition code.
3893 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3895 MODE is the mode of the inputs (in case they are const_int).
3897 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3898 be passed unchanged to emit_cmp_insn, then potentially converted into an
3899 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3902 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3903 enum machine_mode mode
, int unsignedp
, rtx label
)
3905 rtx op0
= x
, op1
= y
;
3907 /* Swap operands and condition to ensure canonical RTL. */
3908 if (swap_commutative_operands_p (x
, y
))
3910 /* If we're not emitting a branch, this means some caller
3915 comparison
= swap_condition (comparison
);
3919 /* If OP0 is still a constant, then both X and Y must be constants.
3920 Force X into a register to create canonical RTL. */
3921 if (CONSTANT_P (op0
))
3922 op0
= force_reg (mode
, op0
);
3926 comparison
= unsigned_condition (comparison
);
3928 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3930 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3933 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3936 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3937 enum machine_mode mode
, int unsignedp
)
3939 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3942 /* Emit a library call comparison between floating point X and Y.
3943 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3946 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3947 enum machine_mode
*pmode
, int *punsignedp
)
3949 enum rtx_code comparison
= *pcomparison
;
3950 enum rtx_code swapped
= swap_condition (comparison
);
3951 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3954 enum machine_mode orig_mode
= GET_MODE (x
);
3955 enum machine_mode mode
;
3956 rtx value
, target
, insns
, equiv
;
3958 bool reversed_p
= false;
3960 for (mode
= orig_mode
;
3962 mode
= GET_MODE_WIDER_MODE (mode
))
3964 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3967 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3970 tmp
= x
; x
= y
; y
= tmp
;
3971 comparison
= swapped
;
3975 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3976 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3978 comparison
= reversed
;
3984 gcc_assert (mode
!= VOIDmode
);
3986 if (mode
!= orig_mode
)
3988 x
= convert_to_mode (mode
, x
, 0);
3989 y
= convert_to_mode (mode
, y
, 0);
3992 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3993 the RTL. The allows the RTL optimizers to delete the libcall if the
3994 condition can be determined at compile-time. */
3995 if (comparison
== UNORDERED
)
3997 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3998 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3999 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
4000 temp
, const_true_rtx
, equiv
);
4004 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
4005 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4007 rtx true_rtx
, false_rtx
;
4012 true_rtx
= const0_rtx
;
4013 false_rtx
= const_true_rtx
;
4017 true_rtx
= const_true_rtx
;
4018 false_rtx
= const0_rtx
;
4022 true_rtx
= const1_rtx
;
4023 false_rtx
= const0_rtx
;
4027 true_rtx
= const0_rtx
;
4028 false_rtx
= constm1_rtx
;
4032 true_rtx
= constm1_rtx
;
4033 false_rtx
= const0_rtx
;
4037 true_rtx
= const0_rtx
;
4038 false_rtx
= const1_rtx
;
4044 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
4045 equiv
, true_rtx
, false_rtx
);
4050 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4051 word_mode
, 2, x
, mode
, y
, mode
);
4052 insns
= get_insns ();
4055 target
= gen_reg_rtx (word_mode
);
4056 emit_libcall_block (insns
, target
, value
, equiv
);
4058 if (comparison
== UNORDERED
4059 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4060 comparison
= reversed_p
? EQ
: NE
;
4065 *pcomparison
= comparison
;
4069 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4072 emit_indirect_jump (rtx loc
)
4074 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4076 loc
= copy_to_mode_reg (Pmode
, loc
);
4078 emit_jump_insn (gen_indirect_jump (loc
));
4082 #ifdef HAVE_conditional_move
4084 /* Emit a conditional move instruction if the machine supports one for that
4085 condition and machine mode.
4087 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4088 the mode to use should they be constants. If it is VOIDmode, they cannot
4091 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4092 should be stored there. MODE is the mode to use should they be constants.
4093 If it is VOIDmode, they cannot both be constants.
4095 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4096 is not supported. */
4099 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4100 enum machine_mode cmode
, rtx op2
, rtx op3
,
4101 enum machine_mode mode
, int unsignedp
)
4103 rtx tem
, subtarget
, comparison
, insn
;
4104 enum insn_code icode
;
4105 enum rtx_code reversed
;
4107 /* If one operand is constant, make it the second one. Only do this
4108 if the other operand is not constant as well. */
4110 if (swap_commutative_operands_p (op0
, op1
))
4115 code
= swap_condition (code
);
4118 /* get_condition will prefer to generate LT and GT even if the old
4119 comparison was against zero, so undo that canonicalization here since
4120 comparisons against zero are cheaper. */
4121 if (code
== LT
&& op1
== const1_rtx
)
4122 code
= LE
, op1
= const0_rtx
;
4123 else if (code
== GT
&& op1
== constm1_rtx
)
4124 code
= GE
, op1
= const0_rtx
;
4126 if (cmode
== VOIDmode
)
4127 cmode
= GET_MODE (op0
);
4129 if (swap_commutative_operands_p (op2
, op3
)
4130 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4139 if (mode
== VOIDmode
)
4140 mode
= GET_MODE (op2
);
4142 icode
= movcc_gen_code
[mode
];
4144 if (icode
== CODE_FOR_nothing
)
4148 target
= gen_reg_rtx (mode
);
4152 /* If the insn doesn't accept these operands, put them in pseudos. */
4154 if (!insn_data
[icode
].operand
[0].predicate
4155 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4156 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4158 if (!insn_data
[icode
].operand
[2].predicate
4159 (op2
, insn_data
[icode
].operand
[2].mode
))
4160 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4162 if (!insn_data
[icode
].operand
[3].predicate
4163 (op3
, insn_data
[icode
].operand
[3].mode
))
4164 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4166 /* Everything should now be in the suitable form, so emit the compare insn
4167 and then the conditional move. */
4170 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4172 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4173 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4174 return NULL and let the caller figure out how best to deal with this
4176 if (GET_CODE (comparison
) != code
)
4179 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4181 /* If that failed, then give up. */
4187 if (subtarget
!= target
)
4188 convert_move (target
, subtarget
, 0);
4193 /* Return nonzero if a conditional move of mode MODE is supported.
4195 This function is for combine so it can tell whether an insn that looks
4196 like a conditional move is actually supported by the hardware. If we
4197 guess wrong we lose a bit on optimization, but that's it. */
4198 /* ??? sparc64 supports conditionally moving integers values based on fp
4199 comparisons, and vice versa. How do we handle them? */
4202 can_conditionally_move_p (enum machine_mode mode
)
4204 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4210 #endif /* HAVE_conditional_move */
4212 /* Emit a conditional addition instruction if the machine supports one for that
4213 condition and machine mode.
4215 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4216 the mode to use should they be constants. If it is VOIDmode, they cannot
4219 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4220 should be stored there. MODE is the mode to use should they be constants.
4221 If it is VOIDmode, they cannot both be constants.
4223 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4224 is not supported. */
4227 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4228 enum machine_mode cmode
, rtx op2
, rtx op3
,
4229 enum machine_mode mode
, int unsignedp
)
4231 rtx tem
, subtarget
, comparison
, insn
;
4232 enum insn_code icode
;
4233 enum rtx_code reversed
;
4235 /* If one operand is constant, make it the second one. Only do this
4236 if the other operand is not constant as well. */
4238 if (swap_commutative_operands_p (op0
, op1
))
4243 code
= swap_condition (code
);
4246 /* get_condition will prefer to generate LT and GT even if the old
4247 comparison was against zero, so undo that canonicalization here since
4248 comparisons against zero are cheaper. */
4249 if (code
== LT
&& op1
== const1_rtx
)
4250 code
= LE
, op1
= const0_rtx
;
4251 else if (code
== GT
&& op1
== constm1_rtx
)
4252 code
= GE
, op1
= const0_rtx
;
4254 if (cmode
== VOIDmode
)
4255 cmode
= GET_MODE (op0
);
4257 if (swap_commutative_operands_p (op2
, op3
)
4258 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4267 if (mode
== VOIDmode
)
4268 mode
= GET_MODE (op2
);
4270 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4272 if (icode
== CODE_FOR_nothing
)
4276 target
= gen_reg_rtx (mode
);
4278 /* If the insn doesn't accept these operands, put them in pseudos. */
4280 if (!insn_data
[icode
].operand
[0].predicate
4281 (target
, insn_data
[icode
].operand
[0].mode
))
4282 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4286 if (!insn_data
[icode
].operand
[2].predicate
4287 (op2
, insn_data
[icode
].operand
[2].mode
))
4288 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4290 if (!insn_data
[icode
].operand
[3].predicate
4291 (op3
, insn_data
[icode
].operand
[3].mode
))
4292 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4294 /* Everything should now be in the suitable form, so emit the compare insn
4295 and then the conditional move. */
4298 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4300 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4301 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4302 return NULL and let the caller figure out how best to deal with this
4304 if (GET_CODE (comparison
) != code
)
4307 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4309 /* If that failed, then give up. */
4315 if (subtarget
!= target
)
4316 convert_move (target
, subtarget
, 0);
4321 /* These functions attempt to generate an insn body, rather than
4322 emitting the insn, but if the gen function already emits them, we
4323 make no attempt to turn them back into naked patterns. */
4325 /* Generate and return an insn body to add Y to X. */
4328 gen_add2_insn (rtx x
, rtx y
)
4330 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4332 gcc_assert (insn_data
[icode
].operand
[0].predicate
4333 (x
, insn_data
[icode
].operand
[0].mode
));
4334 gcc_assert (insn_data
[icode
].operand
[1].predicate
4335 (x
, insn_data
[icode
].operand
[1].mode
));
4336 gcc_assert (insn_data
[icode
].operand
[2].predicate
4337 (y
, insn_data
[icode
].operand
[2].mode
));
4339 return GEN_FCN (icode
) (x
, x
, y
);
4342 /* Generate and return an insn body to add r1 and c,
4343 storing the result in r0. */
4345 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4347 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4349 if (icode
== CODE_FOR_nothing
4350 || !(insn_data
[icode
].operand
[0].predicate
4351 (r0
, insn_data
[icode
].operand
[0].mode
))
4352 || !(insn_data
[icode
].operand
[1].predicate
4353 (r1
, insn_data
[icode
].operand
[1].mode
))
4354 || !(insn_data
[icode
].operand
[2].predicate
4355 (c
, insn_data
[icode
].operand
[2].mode
)))
4358 return GEN_FCN (icode
) (r0
, r1
, c
);
4362 have_add2_insn (rtx x
, rtx y
)
4366 gcc_assert (GET_MODE (x
) != VOIDmode
);
4368 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4370 if (icode
== CODE_FOR_nothing
)
4373 if (!(insn_data
[icode
].operand
[0].predicate
4374 (x
, insn_data
[icode
].operand
[0].mode
))
4375 || !(insn_data
[icode
].operand
[1].predicate
4376 (x
, insn_data
[icode
].operand
[1].mode
))
4377 || !(insn_data
[icode
].operand
[2].predicate
4378 (y
, insn_data
[icode
].operand
[2].mode
)))
4384 /* Generate and return an insn body to subtract Y from X. */
4387 gen_sub2_insn (rtx x
, rtx y
)
4389 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4391 gcc_assert (insn_data
[icode
].operand
[0].predicate
4392 (x
, insn_data
[icode
].operand
[0].mode
));
4393 gcc_assert (insn_data
[icode
].operand
[1].predicate
4394 (x
, insn_data
[icode
].operand
[1].mode
));
4395 gcc_assert (insn_data
[icode
].operand
[2].predicate
4396 (y
, insn_data
[icode
].operand
[2].mode
));
4398 return GEN_FCN (icode
) (x
, x
, y
);
4401 /* Generate and return an insn body to subtract r1 and c,
4402 storing the result in r0. */
4404 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4406 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4408 if (icode
== CODE_FOR_nothing
4409 || !(insn_data
[icode
].operand
[0].predicate
4410 (r0
, insn_data
[icode
].operand
[0].mode
))
4411 || !(insn_data
[icode
].operand
[1].predicate
4412 (r1
, insn_data
[icode
].operand
[1].mode
))
4413 || !(insn_data
[icode
].operand
[2].predicate
4414 (c
, insn_data
[icode
].operand
[2].mode
)))
4417 return GEN_FCN (icode
) (r0
, r1
, c
);
4421 have_sub2_insn (rtx x
, rtx y
)
4425 gcc_assert (GET_MODE (x
) != VOIDmode
);
4427 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4429 if (icode
== CODE_FOR_nothing
)
4432 if (!(insn_data
[icode
].operand
[0].predicate
4433 (x
, insn_data
[icode
].operand
[0].mode
))
4434 || !(insn_data
[icode
].operand
[1].predicate
4435 (x
, insn_data
[icode
].operand
[1].mode
))
4436 || !(insn_data
[icode
].operand
[2].predicate
4437 (y
, insn_data
[icode
].operand
[2].mode
)))
4443 /* Generate the body of an instruction to copy Y into X.
4444 It may be a list of insns, if one insn isn't enough. */
4447 gen_move_insn (rtx x
, rtx y
)
4452 emit_move_insn_1 (x
, y
);
4458 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4459 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4460 no such operation exists, CODE_FOR_nothing will be returned. */
4463 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4467 #ifdef HAVE_ptr_extend
4469 return CODE_FOR_ptr_extend
;
4472 tab
= unsignedp
? zext_optab
: sext_optab
;
4473 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4476 /* Generate the body of an insn to extend Y (with mode MFROM)
4477 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4480 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4481 enum machine_mode mfrom
, int unsignedp
)
4483 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4484 return GEN_FCN (icode
) (x
, y
);
4487 /* can_fix_p and can_float_p say whether the target machine
4488 can directly convert a given fixed point type to
4489 a given floating point type, or vice versa.
4490 The returned value is the CODE_FOR_... value to use,
4491 or CODE_FOR_nothing if these modes cannot be directly converted.
4493 *TRUNCP_PTR is set to 1 if it is necessary to output
4494 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4496 static enum insn_code
4497 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4498 int unsignedp
, int *truncp_ptr
)
4501 enum insn_code icode
;
4503 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4504 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4505 if (icode
!= CODE_FOR_nothing
)
4511 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4512 for this to work. We need to rework the fix* and ftrunc* patterns
4513 and documentation. */
4514 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4515 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4516 if (icode
!= CODE_FOR_nothing
4517 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4524 return CODE_FOR_nothing
;
4527 static enum insn_code
4528 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4533 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4534 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4537 /* Generate code to convert FROM to floating point
4538 and store in TO. FROM must be fixed point and not VOIDmode.
4539 UNSIGNEDP nonzero means regard FROM as unsigned.
4540 Normally this is done by correcting the final value
4541 if it is negative. */
4544 expand_float (rtx to
, rtx from
, int unsignedp
)
4546 enum insn_code icode
;
4548 enum machine_mode fmode
, imode
;
4549 bool can_do_signed
= false;
4551 /* Crash now, because we won't be able to decide which mode to use. */
4552 gcc_assert (GET_MODE (from
) != VOIDmode
);
4554 /* Look for an insn to do the conversion. Do it in the specified
4555 modes if possible; otherwise convert either input, output or both to
4556 wider mode. If the integer mode is wider than the mode of FROM,
4557 we can do the conversion signed even if the input is unsigned. */
4559 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4560 fmode
= GET_MODE_WIDER_MODE (fmode
))
4561 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4562 imode
= GET_MODE_WIDER_MODE (imode
))
4564 int doing_unsigned
= unsignedp
;
4566 if (fmode
!= GET_MODE (to
)
4567 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4570 icode
= can_float_p (fmode
, imode
, unsignedp
);
4571 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4573 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4574 if (scode
!= CODE_FOR_nothing
)
4575 can_do_signed
= true;
4576 if (imode
!= GET_MODE (from
))
4577 icode
= scode
, doing_unsigned
= 0;
4580 if (icode
!= CODE_FOR_nothing
)
4582 if (imode
!= GET_MODE (from
))
4583 from
= convert_to_mode (imode
, from
, unsignedp
);
4585 if (fmode
!= GET_MODE (to
))
4586 target
= gen_reg_rtx (fmode
);
4588 emit_unop_insn (icode
, target
, from
,
4589 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4592 convert_move (to
, target
, 0);
4597 /* Unsigned integer, and no way to convert directly. For binary
4598 floating point modes, convert as signed, then conditionally adjust
4600 if (unsignedp
&& can_do_signed
&& !DECIMAL_FLOAT_MODE_P (GET_MODE (to
)))
4602 rtx label
= gen_label_rtx ();
4604 REAL_VALUE_TYPE offset
;
4606 /* Look for a usable floating mode FMODE wider than the source and at
4607 least as wide as the target. Using FMODE will avoid rounding woes
4608 with unsigned values greater than the signed maximum value. */
4610 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4611 fmode
= GET_MODE_WIDER_MODE (fmode
))
4612 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4613 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4616 if (fmode
== VOIDmode
)
4618 /* There is no such mode. Pretend the target is wide enough. */
4619 fmode
= GET_MODE (to
);
4621 /* Avoid double-rounding when TO is narrower than FROM. */
4622 if ((significand_size (fmode
) + 1)
4623 < GET_MODE_BITSIZE (GET_MODE (from
)))
4626 rtx neglabel
= gen_label_rtx ();
4628 /* Don't use TARGET if it isn't a register, is a hard register,
4629 or is the wrong mode. */
4631 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4632 || GET_MODE (target
) != fmode
)
4633 target
= gen_reg_rtx (fmode
);
4635 imode
= GET_MODE (from
);
4636 do_pending_stack_adjust ();
4638 /* Test whether the sign bit is set. */
4639 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4642 /* The sign bit is not set. Convert as signed. */
4643 expand_float (target
, from
, 0);
4644 emit_jump_insn (gen_jump (label
));
4647 /* The sign bit is set.
4648 Convert to a usable (positive signed) value by shifting right
4649 one bit, while remembering if a nonzero bit was shifted
4650 out; i.e., compute (from & 1) | (from >> 1). */
4652 emit_label (neglabel
);
4653 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4654 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4655 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4657 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4659 expand_float (target
, temp
, 0);
4661 /* Multiply by 2 to undo the shift above. */
4662 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4663 target
, 0, OPTAB_LIB_WIDEN
);
4665 emit_move_insn (target
, temp
);
4667 do_pending_stack_adjust ();
4673 /* If we are about to do some arithmetic to correct for an
4674 unsigned operand, do it in a pseudo-register. */
4676 if (GET_MODE (to
) != fmode
4677 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4678 target
= gen_reg_rtx (fmode
);
4680 /* Convert as signed integer to floating. */
4681 expand_float (target
, from
, 0);
4683 /* If FROM is negative (and therefore TO is negative),
4684 correct its value by 2**bitwidth. */
4686 do_pending_stack_adjust ();
4687 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4691 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4692 temp
= expand_binop (fmode
, add_optab
, target
,
4693 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4694 target
, 0, OPTAB_LIB_WIDEN
);
4696 emit_move_insn (target
, temp
);
4698 do_pending_stack_adjust ();
4703 /* No hardware instruction available; call a library routine. */
4708 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4710 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4711 from
= convert_to_mode (SImode
, from
, unsignedp
);
4713 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4714 gcc_assert (libfunc
);
4718 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4719 GET_MODE (to
), 1, from
,
4721 insns
= get_insns ();
4724 emit_libcall_block (insns
, target
, value
,
4725 gen_rtx_FLOAT (GET_MODE (to
), from
));
4730 /* Copy result to requested destination
4731 if we have been computing in a temp location. */
4735 if (GET_MODE (target
) == GET_MODE (to
))
4736 emit_move_insn (to
, target
);
4738 convert_move (to
, target
, 0);
4742 /* Generate code to convert FROM to fixed point and store in TO. FROM
4743 must be floating point. */
4746 expand_fix (rtx to
, rtx from
, int unsignedp
)
4748 enum insn_code icode
;
4750 enum machine_mode fmode
, imode
;
4753 /* We first try to find a pair of modes, one real and one integer, at
4754 least as wide as FROM and TO, respectively, in which we can open-code
4755 this conversion. If the integer mode is wider than the mode of TO,
4756 we can do the conversion either signed or unsigned. */
4758 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4759 fmode
= GET_MODE_WIDER_MODE (fmode
))
4760 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4761 imode
= GET_MODE_WIDER_MODE (imode
))
4763 int doing_unsigned
= unsignedp
;
4765 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4766 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4767 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4769 if (icode
!= CODE_FOR_nothing
)
4771 if (fmode
!= GET_MODE (from
))
4772 from
= convert_to_mode (fmode
, from
, 0);
4776 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4777 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4781 if (imode
!= GET_MODE (to
))
4782 target
= gen_reg_rtx (imode
);
4784 emit_unop_insn (icode
, target
, from
,
4785 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4787 convert_move (to
, target
, unsignedp
);
4792 /* For an unsigned conversion, there is one more way to do it.
4793 If we have a signed conversion, we generate code that compares
4794 the real value to the largest representable positive number. If if
4795 is smaller, the conversion is done normally. Otherwise, subtract
4796 one plus the highest signed number, convert, and add it back.
4798 We only need to check all real modes, since we know we didn't find
4799 anything with a wider integer mode.
4801 This code used to extend FP value into mode wider than the destination.
4802 This is not needed. Consider, for instance conversion from SFmode
4805 The hot path through the code is dealing with inputs smaller than 2^63
4806 and doing just the conversion, so there is no bits to lose.
4808 In the other path we know the value is positive in the range 2^63..2^64-1
4809 inclusive. (as for other imput overflow happens and result is undefined)
4810 So we know that the most important bit set in mantissa corresponds to
4811 2^63. The subtraction of 2^63 should not generate any rounding as it
4812 simply clears out that bit. The rest is trivial. */
4814 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4815 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4816 fmode
= GET_MODE_WIDER_MODE (fmode
))
4817 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4821 REAL_VALUE_TYPE offset
;
4822 rtx limit
, lab1
, lab2
, insn
;
4824 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4825 real_2expN (&offset
, bitsize
- 1);
4826 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4827 lab1
= gen_label_rtx ();
4828 lab2
= gen_label_rtx ();
4830 if (fmode
!= GET_MODE (from
))
4831 from
= convert_to_mode (fmode
, from
, 0);
4833 /* See if we need to do the subtraction. */
4834 do_pending_stack_adjust ();
4835 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4838 /* If not, do the signed "fix" and branch around fixup code. */
4839 expand_fix (to
, from
, 0);
4840 emit_jump_insn (gen_jump (lab2
));
4843 /* Otherwise, subtract 2**(N-1), convert to signed number,
4844 then add 2**(N-1). Do the addition using XOR since this
4845 will often generate better code. */
4847 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4848 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4849 expand_fix (to
, target
, 0);
4850 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4852 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4854 to
, 1, OPTAB_LIB_WIDEN
);
4857 emit_move_insn (to
, target
);
4861 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4862 != CODE_FOR_nothing
)
4864 /* Make a place for a REG_NOTE and add it. */
4865 insn
= emit_move_insn (to
, to
);
4866 set_unique_reg_note (insn
,
4868 gen_rtx_fmt_e (UNSIGNED_FIX
,
4876 /* We can't do it with an insn, so use a library call. But first ensure
4877 that the mode of TO is at least as wide as SImode, since those are the
4878 only library calls we know about. */
4880 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4882 target
= gen_reg_rtx (SImode
);
4884 expand_fix (target
, from
, unsignedp
);
4892 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4893 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4894 gcc_assert (libfunc
);
4898 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4899 GET_MODE (to
), 1, from
,
4901 insns
= get_insns ();
4904 emit_libcall_block (insns
, target
, value
,
4905 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4906 GET_MODE (to
), from
));
4911 if (GET_MODE (to
) == GET_MODE (target
))
4912 emit_move_insn (to
, target
);
4914 convert_move (to
, target
, 0);
4918 /* Generate code to convert FROM to fixed point and store in TO. FROM
4919 must be floating point, TO must be signed. Use the conversion optab
4920 TAB to do the conversion. */
4923 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
4925 enum insn_code icode
;
4927 enum machine_mode fmode
, imode
;
4929 /* We first try to find a pair of modes, one real and one integer, at
4930 least as wide as FROM and TO, respectively, in which we can open-code
4931 this conversion. If the integer mode is wider than the mode of TO,
4932 we can do the conversion either signed or unsigned. */
4934 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4935 fmode
= GET_MODE_WIDER_MODE (fmode
))
4936 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4937 imode
= GET_MODE_WIDER_MODE (imode
))
4939 icode
= tab
->handlers
[imode
][fmode
].insn_code
;
4940 if (icode
!= CODE_FOR_nothing
)
4942 if (fmode
!= GET_MODE (from
))
4943 from
= convert_to_mode (fmode
, from
, 0);
4945 if (imode
!= GET_MODE (to
))
4946 target
= gen_reg_rtx (imode
);
4948 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
4950 convert_move (to
, target
, 0);
4958 /* Report whether we have an instruction to perform the operation
4959 specified by CODE on operands of mode MODE. */
4961 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4963 return (code_to_optab
[(int) code
] != 0
4964 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4965 != CODE_FOR_nothing
));
4968 /* Create a blank optab. */
4973 optab op
= ggc_alloc (sizeof (struct optab
));
4974 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4976 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4977 op
->handlers
[i
].libfunc
= 0;
4983 static convert_optab
4984 new_convert_optab (void)
4987 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4988 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4989 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4991 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4992 op
->handlers
[i
][j
].libfunc
= 0;
4997 /* Same, but fill in its code as CODE, and write it into the
4998 code_to_optab table. */
5000 init_optab (enum rtx_code code
)
5002 optab op
= new_optab ();
5004 code_to_optab
[(int) code
] = op
;
5008 /* Same, but fill in its code as CODE, and do _not_ write it into
5009 the code_to_optab table. */
5011 init_optabv (enum rtx_code code
)
5013 optab op
= new_optab ();
5018 /* Conversion optabs never go in the code_to_optab table. */
5019 static inline convert_optab
5020 init_convert_optab (enum rtx_code code
)
5022 convert_optab op
= new_convert_optab ();
5027 /* Initialize the libfunc fields of an entire group of entries in some
5028 optab. Each entry is set equal to a string consisting of a leading
5029 pair of underscores followed by a generic operation name followed by
5030 a mode name (downshifted to lowercase) followed by a single character
5031 representing the number of operands for the given operation (which is
5032 usually one of the characters '2', '3', or '4').
5034 OPTABLE is the table in which libfunc fields are to be initialized.
5035 FIRST_MODE is the first machine mode index in the given optab to
5037 LAST_MODE is the last machine mode index in the given optab to
5039 OPNAME is the generic (string) name of the operation.
5040 SUFFIX is the character which specifies the number of operands for
5041 the given generic operation.
5045 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
5046 const char *opname
, int suffix
)
5049 unsigned opname_len
= strlen (opname
);
5051 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5052 mode
= (enum machine_mode
) ((int) mode
+ 1))
5054 const char *mname
= GET_MODE_NAME (mode
);
5055 unsigned mname_len
= strlen (mname
);
5056 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5063 for (q
= opname
; *q
; )
5065 for (q
= mname
; *q
; q
++)
5066 *p
++ = TOLOWER (*q
);
5070 optable
->handlers
[(int) mode
].libfunc
5071 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5075 /* Initialize the libfunc fields of an entire group of entries in some
5076 optab which correspond to all integer mode operations. The parameters
5077 have the same meaning as similarly named ones for the `init_libfuncs'
5078 routine. (See above). */
5081 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
5083 int maxsize
= 2*BITS_PER_WORD
;
5084 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5085 maxsize
= LONG_LONG_TYPE_SIZE
;
5086 init_libfuncs (optable
, word_mode
,
5087 mode_for_size (maxsize
, MODE_INT
, 0),
5091 /* Initialize the libfunc fields of an entire group of entries in some
5092 optab which correspond to all real mode operations. The parameters
5093 have the same meaning as similarly named ones for the `init_libfuncs'
5094 routine. (See above). */
5097 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5099 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
5100 init_libfuncs (optable
, MIN_MODE_DECIMAL_FLOAT
, MAX_MODE_DECIMAL_FLOAT
,
5104 /* Initialize the libfunc fields of an entire group of entries of an
5105 inter-mode-class conversion optab. The string formation rules are
5106 similar to the ones for init_libfuncs, above, but instead of having
5107 a mode name and an operand count these functions have two mode names
5108 and no operand count. */
5110 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5111 enum mode_class from_class
,
5112 enum mode_class to_class
)
5114 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
5115 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
5116 size_t opname_len
= strlen (opname
);
5117 size_t max_mname_len
= 0;
5119 enum machine_mode fmode
, tmode
;
5120 const char *fname
, *tname
;
5122 char *libfunc_name
, *suffix
;
5125 for (fmode
= first_from_mode
;
5127 fmode
= GET_MODE_WIDER_MODE (fmode
))
5128 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
5130 for (tmode
= first_to_mode
;
5132 tmode
= GET_MODE_WIDER_MODE (tmode
))
5133 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
5135 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5136 libfunc_name
[0] = '_';
5137 libfunc_name
[1] = '_';
5138 memcpy (&libfunc_name
[2], opname
, opname_len
);
5139 suffix
= libfunc_name
+ opname_len
+ 2;
5141 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
5142 fmode
= GET_MODE_WIDER_MODE (fmode
))
5143 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
5144 tmode
= GET_MODE_WIDER_MODE (tmode
))
5146 fname
= GET_MODE_NAME (fmode
);
5147 tname
= GET_MODE_NAME (tmode
);
5150 for (q
= fname
; *q
; p
++, q
++)
5152 for (q
= tname
; *q
; p
++, q
++)
5157 tab
->handlers
[tmode
][fmode
].libfunc
5158 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5163 /* Initialize the libfunc fields of an entire group of entries of an
5164 intra-mode-class conversion optab. The string formation rules are
5165 similar to the ones for init_libfunc, above. WIDENING says whether
5166 the optab goes from narrow to wide modes or vice versa. These functions
5167 have two mode names _and_ an operand count. */
5169 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5170 enum mode_class
class, bool widening
)
5172 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
5173 size_t opname_len
= strlen (opname
);
5174 size_t max_mname_len
= 0;
5176 enum machine_mode nmode
, wmode
;
5177 const char *nname
, *wname
;
5179 char *libfunc_name
, *suffix
;
5182 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5183 nmode
= GET_MODE_WIDER_MODE (nmode
))
5184 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
5186 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5187 libfunc_name
[0] = '_';
5188 libfunc_name
[1] = '_';
5189 memcpy (&libfunc_name
[2], opname
, opname_len
);
5190 suffix
= libfunc_name
+ opname_len
+ 2;
5192 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5193 nmode
= GET_MODE_WIDER_MODE (nmode
))
5194 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
5195 wmode
= GET_MODE_WIDER_MODE (wmode
))
5197 nname
= GET_MODE_NAME (nmode
);
5198 wname
= GET_MODE_NAME (wmode
);
5201 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
5203 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
5209 tab
->handlers
[widening
? wmode
: nmode
]
5210 [widening
? nmode
: wmode
].libfunc
5211 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5218 init_one_libfunc (const char *name
)
5222 /* Create a FUNCTION_DECL that can be passed to
5223 targetm.encode_section_info. */
5224 /* ??? We don't have any type information except for this is
5225 a function. Pretend this is "int foo()". */
5226 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5227 build_function_type (integer_type_node
, NULL_TREE
));
5228 DECL_ARTIFICIAL (decl
) = 1;
5229 DECL_EXTERNAL (decl
) = 1;
5230 TREE_PUBLIC (decl
) = 1;
5232 symbol
= XEXP (DECL_RTL (decl
), 0);
5234 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5235 are the flags assigned by targetm.encode_section_info. */
5236 SET_SYMBOL_REF_DECL (symbol
, 0);
5241 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5242 MODE to NAME, which should be either 0 or a string constant. */
5244 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5247 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
5249 optable
->handlers
[mode
].libfunc
= 0;
5252 /* Call this to reset the function entry for one conversion optab
5253 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5254 either 0 or a string constant. */
5256 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5257 enum machine_mode fmode
, const char *name
)
5260 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
5262 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
5265 /* Call this once to initialize the contents of the optabs
5266 appropriately for the current target machine. */
5273 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5275 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5276 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5278 #ifdef HAVE_conditional_move
5279 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5280 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5283 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5285 vcond_gen_code
[i
] = CODE_FOR_nothing
;
5286 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
5289 add_optab
= init_optab (PLUS
);
5290 addv_optab
= init_optabv (PLUS
);
5291 sub_optab
= init_optab (MINUS
);
5292 subv_optab
= init_optabv (MINUS
);
5293 smul_optab
= init_optab (MULT
);
5294 smulv_optab
= init_optabv (MULT
);
5295 smul_highpart_optab
= init_optab (UNKNOWN
);
5296 umul_highpart_optab
= init_optab (UNKNOWN
);
5297 smul_widen_optab
= init_optab (UNKNOWN
);
5298 umul_widen_optab
= init_optab (UNKNOWN
);
5299 usmul_widen_optab
= init_optab (UNKNOWN
);
5300 sdiv_optab
= init_optab (DIV
);
5301 sdivv_optab
= init_optabv (DIV
);
5302 sdivmod_optab
= init_optab (UNKNOWN
);
5303 udiv_optab
= init_optab (UDIV
);
5304 udivmod_optab
= init_optab (UNKNOWN
);
5305 smod_optab
= init_optab (MOD
);
5306 umod_optab
= init_optab (UMOD
);
5307 fmod_optab
= init_optab (UNKNOWN
);
5308 remainder_optab
= init_optab (UNKNOWN
);
5309 ftrunc_optab
= init_optab (UNKNOWN
);
5310 and_optab
= init_optab (AND
);
5311 ior_optab
= init_optab (IOR
);
5312 xor_optab
= init_optab (XOR
);
5313 ashl_optab
= init_optab (ASHIFT
);
5314 ashr_optab
= init_optab (ASHIFTRT
);
5315 lshr_optab
= init_optab (LSHIFTRT
);
5316 rotl_optab
= init_optab (ROTATE
);
5317 rotr_optab
= init_optab (ROTATERT
);
5318 smin_optab
= init_optab (SMIN
);
5319 smax_optab
= init_optab (SMAX
);
5320 umin_optab
= init_optab (UMIN
);
5321 umax_optab
= init_optab (UMAX
);
5322 pow_optab
= init_optab (UNKNOWN
);
5323 atan2_optab
= init_optab (UNKNOWN
);
5325 /* These three have codes assigned exclusively for the sake of
5327 mov_optab
= init_optab (SET
);
5328 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5329 cmp_optab
= init_optab (COMPARE
);
5331 ucmp_optab
= init_optab (UNKNOWN
);
5332 tst_optab
= init_optab (UNKNOWN
);
5334 eq_optab
= init_optab (EQ
);
5335 ne_optab
= init_optab (NE
);
5336 gt_optab
= init_optab (GT
);
5337 ge_optab
= init_optab (GE
);
5338 lt_optab
= init_optab (LT
);
5339 le_optab
= init_optab (LE
);
5340 unord_optab
= init_optab (UNORDERED
);
5342 neg_optab
= init_optab (NEG
);
5343 negv_optab
= init_optabv (NEG
);
5344 abs_optab
= init_optab (ABS
);
5345 absv_optab
= init_optabv (ABS
);
5346 addcc_optab
= init_optab (UNKNOWN
);
5347 one_cmpl_optab
= init_optab (NOT
);
5348 bswap_optab
= init_optab (BSWAP
);
5349 ffs_optab
= init_optab (FFS
);
5350 clz_optab
= init_optab (CLZ
);
5351 ctz_optab
= init_optab (CTZ
);
5352 popcount_optab
= init_optab (POPCOUNT
);
5353 parity_optab
= init_optab (PARITY
);
5354 sqrt_optab
= init_optab (SQRT
);
5355 floor_optab
= init_optab (UNKNOWN
);
5356 ceil_optab
= init_optab (UNKNOWN
);
5357 round_optab
= init_optab (UNKNOWN
);
5358 btrunc_optab
= init_optab (UNKNOWN
);
5359 nearbyint_optab
= init_optab (UNKNOWN
);
5360 rint_optab
= init_optab (UNKNOWN
);
5361 sincos_optab
= init_optab (UNKNOWN
);
5362 sin_optab
= init_optab (UNKNOWN
);
5363 asin_optab
= init_optab (UNKNOWN
);
5364 cos_optab
= init_optab (UNKNOWN
);
5365 acos_optab
= init_optab (UNKNOWN
);
5366 exp_optab
= init_optab (UNKNOWN
);
5367 exp10_optab
= init_optab (UNKNOWN
);
5368 exp2_optab
= init_optab (UNKNOWN
);
5369 expm1_optab
= init_optab (UNKNOWN
);
5370 ldexp_optab
= init_optab (UNKNOWN
);
5371 logb_optab
= init_optab (UNKNOWN
);
5372 ilogb_optab
= init_optab (UNKNOWN
);
5373 log_optab
= init_optab (UNKNOWN
);
5374 log10_optab
= init_optab (UNKNOWN
);
5375 log2_optab
= init_optab (UNKNOWN
);
5376 log1p_optab
= init_optab (UNKNOWN
);
5377 tan_optab
= init_optab (UNKNOWN
);
5378 atan_optab
= init_optab (UNKNOWN
);
5379 copysign_optab
= init_optab (UNKNOWN
);
5381 strlen_optab
= init_optab (UNKNOWN
);
5382 cbranch_optab
= init_optab (UNKNOWN
);
5383 cmov_optab
= init_optab (UNKNOWN
);
5384 cstore_optab
= init_optab (UNKNOWN
);
5385 push_optab
= init_optab (UNKNOWN
);
5387 reduc_smax_optab
= init_optab (UNKNOWN
);
5388 reduc_umax_optab
= init_optab (UNKNOWN
);
5389 reduc_smin_optab
= init_optab (UNKNOWN
);
5390 reduc_umin_optab
= init_optab (UNKNOWN
);
5391 reduc_splus_optab
= init_optab (UNKNOWN
);
5392 reduc_uplus_optab
= init_optab (UNKNOWN
);
5394 ssum_widen_optab
= init_optab (UNKNOWN
);
5395 usum_widen_optab
= init_optab (UNKNOWN
);
5396 sdot_prod_optab
= init_optab (UNKNOWN
);
5397 udot_prod_optab
= init_optab (UNKNOWN
);
5399 vec_extract_optab
= init_optab (UNKNOWN
);
5400 vec_extract_even_optab
= init_optab (UNKNOWN
);
5401 vec_extract_odd_optab
= init_optab (UNKNOWN
);
5402 vec_interleave_high_optab
= init_optab (UNKNOWN
);
5403 vec_interleave_low_optab
= init_optab (UNKNOWN
);
5404 vec_set_optab
= init_optab (UNKNOWN
);
5405 vec_init_optab
= init_optab (UNKNOWN
);
5406 vec_shl_optab
= init_optab (UNKNOWN
);
5407 vec_shr_optab
= init_optab (UNKNOWN
);
5408 vec_realign_load_optab
= init_optab (UNKNOWN
);
5409 movmisalign_optab
= init_optab (UNKNOWN
);
5410 vec_widen_umult_hi_optab
= init_optab (UNKNOWN
);
5411 vec_widen_umult_lo_optab
= init_optab (UNKNOWN
);
5412 vec_widen_smult_hi_optab
= init_optab (UNKNOWN
);
5413 vec_widen_smult_lo_optab
= init_optab (UNKNOWN
);
5414 vec_unpacks_hi_optab
= init_optab (UNKNOWN
);
5415 vec_unpacks_lo_optab
= init_optab (UNKNOWN
);
5416 vec_unpacku_hi_optab
= init_optab (UNKNOWN
);
5417 vec_unpacku_lo_optab
= init_optab (UNKNOWN
);
5418 vec_pack_mod_optab
= init_optab (UNKNOWN
);
5419 vec_pack_usat_optab
= init_optab (UNKNOWN
);
5420 vec_pack_ssat_optab
= init_optab (UNKNOWN
);
5422 powi_optab
= init_optab (UNKNOWN
);
5425 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5426 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5427 trunc_optab
= init_convert_optab (TRUNCATE
);
5428 sfix_optab
= init_convert_optab (FIX
);
5429 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5430 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5431 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5432 sfloat_optab
= init_convert_optab (FLOAT
);
5433 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5434 lrint_optab
= init_convert_optab (UNKNOWN
);
5435 lround_optab
= init_convert_optab (UNKNOWN
);
5436 lfloor_optab
= init_convert_optab (UNKNOWN
);
5437 lceil_optab
= init_convert_optab (UNKNOWN
);
5439 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5441 movmem_optab
[i
] = CODE_FOR_nothing
;
5442 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5443 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
5444 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5445 setmem_optab
[i
] = CODE_FOR_nothing
;
5447 sync_add_optab
[i
] = CODE_FOR_nothing
;
5448 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5449 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5450 sync_and_optab
[i
] = CODE_FOR_nothing
;
5451 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5452 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5453 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5454 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5455 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5456 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5457 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5458 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5459 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5460 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5461 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5462 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5463 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5464 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5465 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5466 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5467 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5468 sync_lock_release
[i
] = CODE_FOR_nothing
;
5470 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5473 /* Fill in the optabs with the insns we support. */
5476 /* Initialize the optabs with the names of the library functions. */
5477 init_integral_libfuncs (add_optab
, "add", '3');
5478 init_floating_libfuncs (add_optab
, "add", '3');
5479 init_integral_libfuncs (addv_optab
, "addv", '3');
5480 init_floating_libfuncs (addv_optab
, "add", '3');
5481 init_integral_libfuncs (sub_optab
, "sub", '3');
5482 init_floating_libfuncs (sub_optab
, "sub", '3');
5483 init_integral_libfuncs (subv_optab
, "subv", '3');
5484 init_floating_libfuncs (subv_optab
, "sub", '3');
5485 init_integral_libfuncs (smul_optab
, "mul", '3');
5486 init_floating_libfuncs (smul_optab
, "mul", '3');
5487 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5488 init_floating_libfuncs (smulv_optab
, "mul", '3');
5489 init_integral_libfuncs (sdiv_optab
, "div", '3');
5490 init_floating_libfuncs (sdiv_optab
, "div", '3');
5491 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5492 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5493 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5494 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5495 init_integral_libfuncs (smod_optab
, "mod", '3');
5496 init_integral_libfuncs (umod_optab
, "umod", '3');
5497 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5498 init_integral_libfuncs (and_optab
, "and", '3');
5499 init_integral_libfuncs (ior_optab
, "ior", '3');
5500 init_integral_libfuncs (xor_optab
, "xor", '3');
5501 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5502 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5503 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5504 init_integral_libfuncs (smin_optab
, "min", '3');
5505 init_floating_libfuncs (smin_optab
, "min", '3');
5506 init_integral_libfuncs (smax_optab
, "max", '3');
5507 init_floating_libfuncs (smax_optab
, "max", '3');
5508 init_integral_libfuncs (umin_optab
, "umin", '3');
5509 init_integral_libfuncs (umax_optab
, "umax", '3');
5510 init_integral_libfuncs (neg_optab
, "neg", '2');
5511 init_floating_libfuncs (neg_optab
, "neg", '2');
5512 init_integral_libfuncs (negv_optab
, "negv", '2');
5513 init_floating_libfuncs (negv_optab
, "neg", '2');
5514 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5515 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5516 init_integral_libfuncs (clz_optab
, "clz", '2');
5517 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5518 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5519 init_integral_libfuncs (parity_optab
, "parity", '2');
5521 /* Comparison libcalls for integers MUST come in pairs,
5523 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5524 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5525 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5527 /* EQ etc are floating point only. */
5528 init_floating_libfuncs (eq_optab
, "eq", '2');
5529 init_floating_libfuncs (ne_optab
, "ne", '2');
5530 init_floating_libfuncs (gt_optab
, "gt", '2');
5531 init_floating_libfuncs (ge_optab
, "ge", '2');
5532 init_floating_libfuncs (lt_optab
, "lt", '2');
5533 init_floating_libfuncs (le_optab
, "le", '2');
5534 init_floating_libfuncs (unord_optab
, "unord", '2');
5536 init_floating_libfuncs (powi_optab
, "powi", '2');
5539 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5540 MODE_INT
, MODE_FLOAT
);
5541 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5542 MODE_INT
, MODE_DECIMAL_FLOAT
);
5543 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5544 MODE_INT
, MODE_FLOAT
);
5545 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5546 MODE_INT
, MODE_DECIMAL_FLOAT
);
5547 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5548 MODE_FLOAT
, MODE_INT
);
5549 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5550 MODE_DECIMAL_FLOAT
, MODE_INT
);
5551 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5552 MODE_FLOAT
, MODE_INT
);
5553 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5554 MODE_DECIMAL_FLOAT
, MODE_INT
);
5555 init_interclass_conv_libfuncs (ufloat_optab
, "floatuns",
5556 MODE_INT
, MODE_DECIMAL_FLOAT
);
5557 init_interclass_conv_libfuncs (lrint_optab
, "lrint",
5558 MODE_INT
, MODE_FLOAT
);
5559 init_interclass_conv_libfuncs (lround_optab
, "lround",
5560 MODE_INT
, MODE_FLOAT
);
5561 init_interclass_conv_libfuncs (lfloor_optab
, "lfloor",
5562 MODE_INT
, MODE_FLOAT
);
5563 init_interclass_conv_libfuncs (lceil_optab
, "lceil",
5564 MODE_INT
, MODE_FLOAT
);
5566 /* sext_optab is also used for FLOAT_EXTEND. */
5567 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5568 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_DECIMAL_FLOAT
, true);
5569 init_interclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, MODE_DECIMAL_FLOAT
);
5570 init_interclass_conv_libfuncs (sext_optab
, "extend", MODE_DECIMAL_FLOAT
, MODE_FLOAT
);
5571 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5572 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_DECIMAL_FLOAT
, false);
5573 init_interclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, MODE_DECIMAL_FLOAT
);
5574 init_interclass_conv_libfuncs (trunc_optab
, "trunc", MODE_DECIMAL_FLOAT
, MODE_FLOAT
);
5576 /* Explicitly initialize the bswap libfuncs since we need them to be
5577 valid for things other than word_mode. */
5578 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
5579 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
5581 /* Use cabs for double complex abs, since systems generally have cabs.
5582 Don't define any libcall for float complex, so that cabs will be used. */
5583 if (complex_double_type_node
)
5584 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5585 = init_one_libfunc ("cabs");
5587 /* The ffs function operates on `int'. */
5588 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5589 = init_one_libfunc ("ffs");
5591 abort_libfunc
= init_one_libfunc ("abort");
5592 memcpy_libfunc
= init_one_libfunc ("memcpy");
5593 memmove_libfunc
= init_one_libfunc ("memmove");
5594 memcmp_libfunc
= init_one_libfunc ("memcmp");
5595 memset_libfunc
= init_one_libfunc ("memset");
5596 setbits_libfunc
= init_one_libfunc ("__setbits");
5598 #ifndef DONT_USE_BUILTIN_SETJMP
5599 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5600 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5602 setjmp_libfunc
= init_one_libfunc ("setjmp");
5603 longjmp_libfunc
= init_one_libfunc ("longjmp");
5605 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5606 unwind_sjlj_unregister_libfunc
5607 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5609 /* For function entry/exit instrumentation. */
5610 profile_function_entry_libfunc
5611 = init_one_libfunc ("__cyg_profile_func_enter");
5612 profile_function_exit_libfunc
5613 = init_one_libfunc ("__cyg_profile_func_exit");
5615 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5617 if (HAVE_conditional_trap
)
5618 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5620 /* Allow the target to add more libcalls or rename some, etc. */
5621 targetm
.init_libfuncs ();
5626 /* Print information about the current contents of the optabs on
5630 debug_optab_libfuncs (void)
5636 /* Dump the arithmetic optabs. */
5637 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5638 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5641 struct optab_handlers
*h
;
5644 h
= &o
->handlers
[j
];
5647 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5648 fprintf (stderr
, "%s\t%s:\t%s\n",
5649 GET_RTX_NAME (o
->code
),
5651 XSTR (h
->libfunc
, 0));
5655 /* Dump the conversion optabs. */
5656 for (i
= 0; i
< (int) COI_MAX
; ++i
)
5657 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5658 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5661 struct optab_handlers
*h
;
5663 o
= &convert_optab_table
[i
];
5664 h
= &o
->handlers
[j
][k
];
5667 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5668 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5669 GET_RTX_NAME (o
->code
),
5672 XSTR (h
->libfunc
, 0));
5680 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5681 CODE. Return 0 on failure. */
5684 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5685 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5687 enum machine_mode mode
= GET_MODE (op1
);
5688 enum insn_code icode
;
5691 if (!HAVE_conditional_trap
)
5694 if (mode
== VOIDmode
)
5697 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5698 if (icode
== CODE_FOR_nothing
)
5702 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5703 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5709 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5711 PUT_CODE (trap_rtx
, code
);
5712 gcc_assert (HAVE_conditional_trap
);
5713 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5717 insn
= get_insns ();
5724 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5725 or unsigned operation code. */
5727 static enum rtx_code
5728 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5740 code
= unsignedp
? LTU
: LT
;
5743 code
= unsignedp
? LEU
: LE
;
5746 code
= unsignedp
? GTU
: GT
;
5749 code
= unsignedp
? GEU
: GE
;
5752 case UNORDERED_EXPR
:
5783 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5784 unsigned operators. Do not generate compare instruction. */
5787 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5789 enum rtx_code rcode
;
5791 rtx rtx_op0
, rtx_op1
;
5793 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5794 ensures that condition is a relational operation. */
5795 gcc_assert (COMPARISON_CLASS_P (cond
));
5797 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5798 t_op0
= TREE_OPERAND (cond
, 0);
5799 t_op1
= TREE_OPERAND (cond
, 1);
5801 /* Expand operands. */
5802 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5803 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5805 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5806 && GET_MODE (rtx_op0
) != VOIDmode
)
5807 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5809 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5810 && GET_MODE (rtx_op1
) != VOIDmode
)
5811 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5813 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5816 /* Return insn code for VEC_COND_EXPR EXPR. */
5818 static inline enum insn_code
5819 get_vcond_icode (tree expr
, enum machine_mode mode
)
5821 enum insn_code icode
= CODE_FOR_nothing
;
5823 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5824 icode
= vcondu_gen_code
[mode
];
5826 icode
= vcond_gen_code
[mode
];
5830 /* Return TRUE iff, appropriate vector insns are available
5831 for vector cond expr expr in VMODE mode. */
5834 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5836 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5841 /* Generate insns for VEC_COND_EXPR. */
5844 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5846 enum insn_code icode
;
5847 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5848 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5849 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5851 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5852 if (icode
== CODE_FOR_nothing
)
5855 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5856 target
= gen_reg_rtx (mode
);
5858 /* Get comparison rtx. First expand both cond expr operands. */
5859 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5861 cc_op0
= XEXP (comparison
, 0);
5862 cc_op1
= XEXP (comparison
, 1);
5863 /* Expand both operands and force them in reg, if required. */
5864 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5865 NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5866 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5867 && mode
!= VOIDmode
)
5868 rtx_op1
= force_reg (mode
, rtx_op1
);
5870 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5871 NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5872 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5873 && mode
!= VOIDmode
)
5874 rtx_op2
= force_reg (mode
, rtx_op2
);
5876 /* Emit instruction! */
5877 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5878 comparison
, cc_op0
, cc_op1
));
5884 /* This is an internal subroutine of the other compare_and_swap expanders.
5885 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5886 operation. TARGET is an optional place to store the value result of
5887 the operation. ICODE is the particular instruction to expand. Return
5888 the result of the operation. */
5891 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5892 rtx target
, enum insn_code icode
)
5894 enum machine_mode mode
= GET_MODE (mem
);
5897 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5898 target
= gen_reg_rtx (mode
);
5900 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5901 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5902 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5903 old_val
= force_reg (mode
, old_val
);
5905 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5906 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5907 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5908 new_val
= force_reg (mode
, new_val
);
5910 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5911 if (insn
== NULL_RTX
)
5918 /* Expand a compare-and-swap operation and return its value. */
5921 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5923 enum machine_mode mode
= GET_MODE (mem
);
5924 enum insn_code icode
= sync_compare_and_swap
[mode
];
5926 if (icode
== CODE_FOR_nothing
)
5929 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5932 /* Expand a compare-and-swap operation and store true into the result if
5933 the operation was successful and false otherwise. Return the result.
5934 Unlike other routines, TARGET is not optional. */
5937 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5939 enum machine_mode mode
= GET_MODE (mem
);
5940 enum insn_code icode
;
5941 rtx subtarget
, label0
, label1
;
5943 /* If the target supports a compare-and-swap pattern that simultaneously
5944 sets some flag for success, then use it. Otherwise use the regular
5945 compare-and-swap and follow that immediately with a compare insn. */
5946 icode
= sync_compare_and_swap_cc
[mode
];
5950 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5952 if (subtarget
!= NULL_RTX
)
5956 case CODE_FOR_nothing
:
5957 icode
= sync_compare_and_swap
[mode
];
5958 if (icode
== CODE_FOR_nothing
)
5961 /* Ensure that if old_val == mem, that we're not comparing
5962 against an old value. */
5963 if (MEM_P (old_val
))
5964 old_val
= force_reg (mode
, old_val
);
5966 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5968 if (subtarget
== NULL_RTX
)
5971 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5974 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5975 setcc instruction from the beginning. We don't work too hard here,
5976 but it's nice to not be stupid about initial code gen either. */
5977 if (STORE_FLAG_VALUE
== 1)
5979 icode
= setcc_gen_code
[EQ
];
5980 if (icode
!= CODE_FOR_nothing
)
5982 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5986 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5987 subtarget
= gen_reg_rtx (cmode
);
5989 insn
= GEN_FCN (icode
) (subtarget
);
5993 if (GET_MODE (target
) != GET_MODE (subtarget
))
5995 convert_move (target
, subtarget
, 1);
6003 /* Without an appropriate setcc instruction, use a set of branches to
6004 get 1 and 0 stored into target. Presumably if the target has a
6005 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6007 label0
= gen_label_rtx ();
6008 label1
= gen_label_rtx ();
6010 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
6011 emit_move_insn (target
, const0_rtx
);
6012 emit_jump_insn (gen_jump (label1
));
6014 emit_label (label0
);
6015 emit_move_insn (target
, const1_rtx
);
6016 emit_label (label1
);
6021 /* This is a helper function for the other atomic operations. This function
6022 emits a loop that contains SEQ that iterates until a compare-and-swap
6023 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6024 a set of instructions that takes a value from OLD_REG as an input and
6025 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6026 set to the current contents of MEM. After SEQ, a compare-and-swap will
6027 attempt to update MEM with NEW_REG. The function returns true when the
6028 loop was generated successfully. */
6031 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6033 enum machine_mode mode
= GET_MODE (mem
);
6034 enum insn_code icode
;
6035 rtx label
, cmp_reg
, subtarget
;
6037 /* The loop we want to generate looks like
6043 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6044 if (cmp_reg != old_reg)
6047 Note that we only do the plain load from memory once. Subsequent
6048 iterations use the value loaded by the compare-and-swap pattern. */
6050 label
= gen_label_rtx ();
6051 cmp_reg
= gen_reg_rtx (mode
);
6053 emit_move_insn (cmp_reg
, mem
);
6055 emit_move_insn (old_reg
, cmp_reg
);
6059 /* If the target supports a compare-and-swap pattern that simultaneously
6060 sets some flag for success, then use it. Otherwise use the regular
6061 compare-and-swap and follow that immediately with a compare insn. */
6062 icode
= sync_compare_and_swap_cc
[mode
];
6066 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6068 if (subtarget
!= NULL_RTX
)
6070 gcc_assert (subtarget
== cmp_reg
);
6075 case CODE_FOR_nothing
:
6076 icode
= sync_compare_and_swap
[mode
];
6077 if (icode
== CODE_FOR_nothing
)
6080 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6082 if (subtarget
== NULL_RTX
)
6084 if (subtarget
!= cmp_reg
)
6085 emit_move_insn (cmp_reg
, subtarget
);
6087 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
6090 /* ??? Mark this jump predicted not taken? */
6091 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
6096 /* This function generates the atomic operation MEM CODE= VAL. In this
6097 case, we do not care about any resulting value. Returns NULL if we
6098 cannot generate the operation. */
6101 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
6103 enum machine_mode mode
= GET_MODE (mem
);
6104 enum insn_code icode
;
6107 /* Look to see if the target supports the operation directly. */
6111 icode
= sync_add_optab
[mode
];
6114 icode
= sync_ior_optab
[mode
];
6117 icode
= sync_xor_optab
[mode
];
6120 icode
= sync_and_optab
[mode
];
6123 icode
= sync_nand_optab
[mode
];
6127 icode
= sync_sub_optab
[mode
];
6128 if (icode
== CODE_FOR_nothing
)
6130 icode
= sync_add_optab
[mode
];
6131 if (icode
!= CODE_FOR_nothing
)
6133 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6143 /* Generate the direct operation, if present. */
6144 if (icode
!= CODE_FOR_nothing
)
6146 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6147 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6148 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
6149 val
= force_reg (mode
, val
);
6151 insn
= GEN_FCN (icode
) (mem
, val
);
6159 /* Failing that, generate a compare-and-swap loop in which we perform the
6160 operation with normal arithmetic instructions. */
6161 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6163 rtx t0
= gen_reg_rtx (mode
), t1
;
6170 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6173 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6174 true, OPTAB_LIB_WIDEN
);
6176 insn
= get_insns ();
6179 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6186 /* This function generates the atomic operation MEM CODE= VAL. In this
6187 case, we do care about the resulting value: if AFTER is true then
6188 return the value MEM holds after the operation, if AFTER is false
6189 then return the value MEM holds before the operation. TARGET is an
6190 optional place for the result value to be stored. */
6193 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
6194 bool after
, rtx target
)
6196 enum machine_mode mode
= GET_MODE (mem
);
6197 enum insn_code old_code
, new_code
, icode
;
6201 /* Look to see if the target supports the operation directly. */
6205 old_code
= sync_old_add_optab
[mode
];
6206 new_code
= sync_new_add_optab
[mode
];
6209 old_code
= sync_old_ior_optab
[mode
];
6210 new_code
= sync_new_ior_optab
[mode
];
6213 old_code
= sync_old_xor_optab
[mode
];
6214 new_code
= sync_new_xor_optab
[mode
];
6217 old_code
= sync_old_and_optab
[mode
];
6218 new_code
= sync_new_and_optab
[mode
];
6221 old_code
= sync_old_nand_optab
[mode
];
6222 new_code
= sync_new_nand_optab
[mode
];
6226 old_code
= sync_old_sub_optab
[mode
];
6227 new_code
= sync_new_sub_optab
[mode
];
6228 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
6230 old_code
= sync_old_add_optab
[mode
];
6231 new_code
= sync_new_add_optab
[mode
];
6232 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
6234 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6244 /* If the target does supports the proper new/old operation, great. But
6245 if we only support the opposite old/new operation, check to see if we
6246 can compensate. In the case in which the old value is supported, then
6247 we can always perform the operation again with normal arithmetic. In
6248 the case in which the new value is supported, then we can only handle
6249 this in the case the operation is reversible. */
6254 if (icode
== CODE_FOR_nothing
)
6257 if (icode
!= CODE_FOR_nothing
)
6264 if (icode
== CODE_FOR_nothing
6265 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
6268 if (icode
!= CODE_FOR_nothing
)
6273 /* If we found something supported, great. */
6274 if (icode
!= CODE_FOR_nothing
)
6276 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6277 target
= gen_reg_rtx (mode
);
6279 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6280 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6281 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6282 val
= force_reg (mode
, val
);
6284 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6289 /* If we need to compensate for using an operation with the
6290 wrong return value, do so now. */
6297 else if (code
== MINUS
)
6302 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
6303 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
6304 true, OPTAB_LIB_WIDEN
);
6311 /* Failing that, generate a compare-and-swap loop in which we perform the
6312 operation with normal arithmetic instructions. */
6313 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6315 rtx t0
= gen_reg_rtx (mode
), t1
;
6317 if (!target
|| !register_operand (target
, mode
))
6318 target
= gen_reg_rtx (mode
);
6323 emit_move_insn (target
, t0
);
6327 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6330 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6331 true, OPTAB_LIB_WIDEN
);
6333 emit_move_insn (target
, t1
);
6335 insn
= get_insns ();
6338 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6345 /* This function expands a test-and-set operation. Ideally we atomically
6346 store VAL in MEM and return the previous value in MEM. Some targets
6347 may not support this operation and only support VAL with the constant 1;
6348 in this case while the return value will be 0/1, but the exact value
6349 stored in MEM is target defined. TARGET is an option place to stick
6350 the return value. */
6353 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
6355 enum machine_mode mode
= GET_MODE (mem
);
6356 enum insn_code icode
;
6359 /* If the target supports the test-and-set directly, great. */
6360 icode
= sync_lock_test_and_set
[mode
];
6361 if (icode
!= CODE_FOR_nothing
)
6363 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6364 target
= gen_reg_rtx (mode
);
6366 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6367 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6368 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6369 val
= force_reg (mode
, val
);
6371 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6379 /* Otherwise, use a compare-and-swap loop for the exchange. */
6380 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6382 if (!target
|| !register_operand (target
, mode
))
6383 target
= gen_reg_rtx (mode
);
6384 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6385 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6386 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6393 #include "gt-optabs.h"