1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[COI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
149 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
151 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code
) != RTX_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_UNARY
)
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target
, op0
)
179 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
181 insn
= PREV_INSN (last_insn
);
182 while (insn
!= NULL_RTX
)
184 if (reg_set_p (target
, insn
))
187 insn
= PREV_INSN (insn
);
191 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
192 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
194 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
196 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
209 int unsignedp
, int no_extend
)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
222 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
223 return convert_modes (mode
, oldmode
, op
, unsignedp
);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
228 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result
= gen_reg_rtx (mode
);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
235 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code
, tree type
)
257 return one_cmpl_optab
;
266 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
274 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
280 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
289 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
292 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
294 case REALIGN_LOAD_EXPR
:
295 return vec_realign_load_optab
;
298 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
301 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
304 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
307 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
309 case REDUC_PLUS_EXPR
:
310 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
312 case VEC_LSHIFT_EXPR
:
313 return vec_shl_optab
;
315 case VEC_RSHIFT_EXPR
:
316 return vec_shr_optab
;
318 case VEC_WIDEN_MULT_HI_EXPR
:
319 return TYPE_UNSIGNED (type
) ?
320 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
322 case VEC_WIDEN_MULT_LO_EXPR
:
323 return TYPE_UNSIGNED (type
) ?
324 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
326 case VEC_UNPACK_HI_EXPR
:
327 return TYPE_UNSIGNED (type
) ?
328 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
330 case VEC_UNPACK_LO_EXPR
:
331 return TYPE_UNSIGNED (type
) ?
332 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
334 case VEC_PACK_MOD_EXPR
:
335 return vec_pack_mod_optab
;
337 case VEC_PACK_SAT_EXPR
:
338 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
344 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
348 return trapv
? addv_optab
: add_optab
;
351 return trapv
? subv_optab
: sub_optab
;
354 return trapv
? smulv_optab
: smul_optab
;
357 return trapv
? negv_optab
: neg_optab
;
360 return trapv
? absv_optab
: abs_optab
;
362 case VEC_EXTRACT_EVEN_EXPR
:
363 return vec_extract_even_optab
;
365 case VEC_EXTRACT_ODD_EXPR
:
366 return vec_extract_odd_optab
;
368 case VEC_INTERLEAVE_HIGH_EXPR
:
369 return vec_interleave_high_optab
;
371 case VEC_INTERLEAVE_LOW_EXPR
:
372 return vec_interleave_low_optab
;
380 /* Expand vector widening operations.
382 There are two different classes of operations handled here:
383 1) Operations whose result is wider than all the arguments to the operation.
384 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
385 In this case OP0 and optionally OP1 would be initialized,
386 but WIDE_OP wouldn't (not relevant for this case).
387 2) Operations whose result is of the same size as the last argument to the
388 operation, but wider than all the other arguments to the operation.
389 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
390 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
392 E.g, when called to expand the following operations, this is how
393 the arguments will be initialized:
395 widening-sum 2 oprnd0 - oprnd1
396 widening-dot-product 3 oprnd0 oprnd1 oprnd2
397 widening-mult 2 oprnd0 oprnd1 -
398 type-promotion (vec-unpack) 1 oprnd0 - - */
401 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
404 tree oprnd0
, oprnd1
, oprnd2
;
405 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
406 optab widen_pattern_optab
;
408 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
411 rtx xop0
, xop1
, wxop
;
412 int nops
= TREE_CODE_LENGTH (TREE_CODE (exp
));
414 oprnd0
= TREE_OPERAND (exp
, 0);
415 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
416 widen_pattern_optab
=
417 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
418 icode
= (int) widen_pattern_optab
->handlers
[(int) tmode0
].insn_code
;
419 gcc_assert (icode
!= CODE_FOR_nothing
);
420 xmode0
= insn_data
[icode
].operand
[1].mode
;
424 oprnd1
= TREE_OPERAND (exp
, 1);
425 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
426 xmode1
= insn_data
[icode
].operand
[2].mode
;
429 /* The last operand is of a wider mode than the rest of the operands. */
437 gcc_assert (tmode1
== tmode0
);
439 oprnd2
= TREE_OPERAND (exp
, 2);
440 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
441 wxmode
= insn_data
[icode
].operand
[3].mode
;
445 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
448 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
449 temp
= gen_reg_rtx (wmode
);
457 /* In case the insn wants input operands in modes different from
458 those of the actual operands, convert the operands. It would
459 seem that we don't need to convert CONST_INTs, but we do, so
460 that they're properly zero-extended, sign-extended or truncated
463 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
464 xop0
= convert_modes (xmode0
,
465 GET_MODE (op0
) != VOIDmode
471 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
472 xop1
= convert_modes (xmode1
,
473 GET_MODE (op1
) != VOIDmode
479 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
480 wxop
= convert_modes (wxmode
,
481 GET_MODE (wide_op
) != VOIDmode
486 /* Now, if insn's predicates don't allow our operands, put them into
489 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
490 && xmode0
!= VOIDmode
)
491 xop0
= copy_to_mode_reg (xmode0
, xop0
);
495 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
496 && xmode1
!= VOIDmode
)
497 xop1
= copy_to_mode_reg (xmode1
, xop1
);
501 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
502 && wxmode
!= VOIDmode
)
503 wxop
= copy_to_mode_reg (wxmode
, wxop
);
505 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
508 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
514 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
515 && wxmode
!= VOIDmode
)
516 wxop
= copy_to_mode_reg (wxmode
, wxop
);
518 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
521 pat
= GEN_FCN (icode
) (temp
, xop0
);
528 /* Generate code to perform an operation specified by TERNARY_OPTAB
529 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
531 UNSIGNEDP is for the case where we have to widen the operands
532 to perform the operation. It says to use zero-extension.
534 If TARGET is nonzero, the value
535 is generated there, if it is convenient to do so.
536 In all cases an rtx is returned for the locus of the value;
537 this may or may not be TARGET. */
540 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
541 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
543 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
544 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
545 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
546 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
549 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
551 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
552 != CODE_FOR_nothing
);
554 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
555 temp
= gen_reg_rtx (mode
);
559 /* In case the insn wants input operands in modes different from
560 those of the actual operands, convert the operands. It would
561 seem that we don't need to convert CONST_INTs, but we do, so
562 that they're properly zero-extended, sign-extended or truncated
565 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
566 xop0
= convert_modes (mode0
,
567 GET_MODE (op0
) != VOIDmode
572 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
573 xop1
= convert_modes (mode1
,
574 GET_MODE (op1
) != VOIDmode
579 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
580 xop2
= convert_modes (mode2
,
581 GET_MODE (op2
) != VOIDmode
586 /* Now, if insn's predicates don't allow our operands, put them into
589 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
590 && mode0
!= VOIDmode
)
591 xop0
= copy_to_mode_reg (mode0
, xop0
);
593 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
594 && mode1
!= VOIDmode
)
595 xop1
= copy_to_mode_reg (mode1
, xop1
);
597 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
598 && mode2
!= VOIDmode
)
599 xop2
= copy_to_mode_reg (mode2
, xop2
);
601 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
608 /* Like expand_binop, but return a constant rtx if the result can be
609 calculated at compile time. The arguments and return value are
610 otherwise the same as for expand_binop. */
613 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
614 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
615 enum optab_methods methods
)
617 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
619 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
625 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
628 /* Like simplify_expand_binop, but always put the result in TARGET.
629 Return true if the expansion succeeded. */
632 force_expand_binop (enum machine_mode mode
, optab binoptab
,
633 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
634 enum optab_methods methods
)
636 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
637 target
, unsignedp
, methods
);
641 emit_move_insn (target
, x
);
645 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
648 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
650 enum insn_code icode
;
651 rtx rtx_op1
, rtx_op2
;
652 enum machine_mode mode1
;
653 enum machine_mode mode2
;
654 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
655 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
656 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
660 switch (TREE_CODE (vec_shift_expr
))
662 case VEC_RSHIFT_EXPR
:
663 shift_optab
= vec_shr_optab
;
665 case VEC_LSHIFT_EXPR
:
666 shift_optab
= vec_shl_optab
;
672 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
673 gcc_assert (icode
!= CODE_FOR_nothing
);
675 mode1
= insn_data
[icode
].operand
[1].mode
;
676 mode2
= insn_data
[icode
].operand
[2].mode
;
678 rtx_op1
= expand_expr (vec_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
679 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
680 && mode1
!= VOIDmode
)
681 rtx_op1
= force_reg (mode1
, rtx_op1
);
683 rtx_op2
= expand_expr (shift_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
684 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
685 && mode2
!= VOIDmode
)
686 rtx_op2
= force_reg (mode2
, rtx_op2
);
689 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
690 target
= gen_reg_rtx (mode
);
692 /* Emit instruction */
693 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
700 /* This subroutine of expand_doubleword_shift handles the cases in which
701 the effective shift value is >= BITS_PER_WORD. The arguments and return
702 value are the same as for the parent routine, except that SUPERWORD_OP1
703 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
704 INTO_TARGET may be null if the caller has decided to calculate it. */
707 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
708 rtx outof_target
, rtx into_target
,
709 int unsignedp
, enum optab_methods methods
)
711 if (into_target
!= 0)
712 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
713 into_target
, unsignedp
, methods
))
716 if (outof_target
!= 0)
718 /* For a signed right shift, we must fill OUTOF_TARGET with copies
719 of the sign bit, otherwise we must fill it with zeros. */
720 if (binoptab
!= ashr_optab
)
721 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
723 if (!force_expand_binop (word_mode
, binoptab
,
724 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
725 outof_target
, unsignedp
, methods
))
731 /* This subroutine of expand_doubleword_shift handles the cases in which
732 the effective shift value is < BITS_PER_WORD. The arguments and return
733 value are the same as for the parent routine. */
736 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
737 rtx outof_input
, rtx into_input
, rtx op1
,
738 rtx outof_target
, rtx into_target
,
739 int unsignedp
, enum optab_methods methods
,
740 unsigned HOST_WIDE_INT shift_mask
)
742 optab reverse_unsigned_shift
, unsigned_shift
;
745 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
746 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
748 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
749 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
750 the opposite direction to BINOPTAB. */
751 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
753 carries
= outof_input
;
754 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
755 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
760 /* We must avoid shifting by BITS_PER_WORD bits since that is either
761 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
762 has unknown behavior. Do a single shift first, then shift by the
763 remainder. It's OK to use ~OP1 as the remainder if shift counts
764 are truncated to the mode size. */
765 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
766 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
767 if (shift_mask
== BITS_PER_WORD
- 1)
769 tmp
= immed_double_const (-1, -1, op1_mode
);
770 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
775 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
776 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
780 if (tmp
== 0 || carries
== 0)
782 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
783 carries
, tmp
, 0, unsignedp
, methods
);
787 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
788 so the result can go directly into INTO_TARGET if convenient. */
789 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
790 into_target
, unsignedp
, methods
);
794 /* Now OR in the bits carried over from OUTOF_INPUT. */
795 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
796 into_target
, unsignedp
, methods
))
799 /* Use a standard word_mode shift for the out-of half. */
800 if (outof_target
!= 0)
801 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
802 outof_target
, unsignedp
, methods
))
809 #ifdef HAVE_conditional_move
810 /* Try implementing expand_doubleword_shift using conditional moves.
811 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
812 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
813 are the shift counts to use in the former and latter case. All other
814 arguments are the same as the parent routine. */
817 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
818 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
819 rtx outof_input
, rtx into_input
,
820 rtx subword_op1
, rtx superword_op1
,
821 rtx outof_target
, rtx into_target
,
822 int unsignedp
, enum optab_methods methods
,
823 unsigned HOST_WIDE_INT shift_mask
)
825 rtx outof_superword
, into_superword
;
827 /* Put the superword version of the output into OUTOF_SUPERWORD and
829 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
830 if (outof_target
!= 0 && subword_op1
== superword_op1
)
832 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
833 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
834 into_superword
= outof_target
;
835 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
836 outof_superword
, 0, unsignedp
, methods
))
841 into_superword
= gen_reg_rtx (word_mode
);
842 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
843 outof_superword
, into_superword
,
848 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
849 if (!expand_subword_shift (op1_mode
, binoptab
,
850 outof_input
, into_input
, subword_op1
,
851 outof_target
, into_target
,
852 unsignedp
, methods
, shift_mask
))
855 /* Select between them. Do the INTO half first because INTO_SUPERWORD
856 might be the current value of OUTOF_TARGET. */
857 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
858 into_target
, into_superword
, word_mode
, false))
861 if (outof_target
!= 0)
862 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
863 outof_target
, outof_superword
,
871 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
872 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
873 input operand; the shift moves bits in the direction OUTOF_INPUT->
874 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
875 of the target. OP1 is the shift count and OP1_MODE is its mode.
876 If OP1 is constant, it will have been truncated as appropriate
877 and is known to be nonzero.
879 If SHIFT_MASK is zero, the result of word shifts is undefined when the
880 shift count is outside the range [0, BITS_PER_WORD). This routine must
881 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
883 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
884 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
885 fill with zeros or sign bits as appropriate.
887 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
888 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
889 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
890 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
893 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
894 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
895 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
896 function wants to calculate it itself.
898 Return true if the shift could be successfully synthesized. */
901 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
902 rtx outof_input
, rtx into_input
, rtx op1
,
903 rtx outof_target
, rtx into_target
,
904 int unsignedp
, enum optab_methods methods
,
905 unsigned HOST_WIDE_INT shift_mask
)
907 rtx superword_op1
, tmp
, cmp1
, cmp2
;
908 rtx subword_label
, done_label
;
909 enum rtx_code cmp_code
;
911 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
912 fill the result with sign or zero bits as appropriate. If so, the value
913 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
914 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
915 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
917 This isn't worthwhile for constant shifts since the optimizers will
918 cope better with in-range shift counts. */
919 if (shift_mask
>= BITS_PER_WORD
921 && !CONSTANT_P (op1
))
923 if (!expand_doubleword_shift (op1_mode
, binoptab
,
924 outof_input
, into_input
, op1
,
926 unsignedp
, methods
, shift_mask
))
928 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
929 outof_target
, unsignedp
, methods
))
934 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
935 is true when the effective shift value is less than BITS_PER_WORD.
936 Set SUPERWORD_OP1 to the shift count that should be used to shift
937 OUTOF_INPUT into INTO_TARGET when the condition is false. */
938 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
939 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
941 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
942 is a subword shift count. */
943 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
945 cmp2
= CONST0_RTX (op1_mode
);
951 /* Set CMP1 to OP1 - BITS_PER_WORD. */
952 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
954 cmp2
= CONST0_RTX (op1_mode
);
956 superword_op1
= cmp1
;
961 /* If we can compute the condition at compile time, pick the
962 appropriate subroutine. */
963 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
964 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
966 if (tmp
== const0_rtx
)
967 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
968 outof_target
, into_target
,
971 return expand_subword_shift (op1_mode
, binoptab
,
972 outof_input
, into_input
, op1
,
973 outof_target
, into_target
,
974 unsignedp
, methods
, shift_mask
);
977 #ifdef HAVE_conditional_move
978 /* Try using conditional moves to generate straight-line code. */
980 rtx start
= get_last_insn ();
981 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
982 cmp_code
, cmp1
, cmp2
,
983 outof_input
, into_input
,
985 outof_target
, into_target
,
986 unsignedp
, methods
, shift_mask
))
988 delete_insns_since (start
);
992 /* As a last resort, use branches to select the correct alternative. */
993 subword_label
= gen_label_rtx ();
994 done_label
= gen_label_rtx ();
997 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
998 0, 0, subword_label
);
1001 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1002 outof_target
, into_target
,
1003 unsignedp
, methods
))
1006 emit_jump_insn (gen_jump (done_label
));
1008 emit_label (subword_label
);
1010 if (!expand_subword_shift (op1_mode
, binoptab
,
1011 outof_input
, into_input
, op1
,
1012 outof_target
, into_target
,
1013 unsignedp
, methods
, shift_mask
))
1016 emit_label (done_label
);
1020 /* Subroutine of expand_binop. Perform a double word multiplication of
1021 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1022 as the target's word_mode. This function return NULL_RTX if anything
1023 goes wrong, in which case it may have already emitted instructions
1024 which need to be deleted.
1026 If we want to multiply two two-word values and have normal and widening
1027 multiplies of single-word values, we can do this with three smaller
1028 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1029 because we are not operating on one word at a time.
1031 The multiplication proceeds as follows:
1032 _______________________
1033 [__op0_high_|__op0_low__]
1034 _______________________
1035 * [__op1_high_|__op1_low__]
1036 _______________________________________________
1037 _______________________
1038 (1) [__op0_low__*__op1_low__]
1039 _______________________
1040 (2a) [__op0_low__*__op1_high_]
1041 _______________________
1042 (2b) [__op0_high_*__op1_low__]
1043 _______________________
1044 (3) [__op0_high_*__op1_high_]
1047 This gives a 4-word result. Since we are only interested in the
1048 lower 2 words, partial result (3) and the upper words of (2a) and
1049 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1050 calculated using non-widening multiplication.
1052 (1), however, needs to be calculated with an unsigned widening
1053 multiplication. If this operation is not directly supported we
1054 try using a signed widening multiplication and adjust the result.
1055 This adjustment works as follows:
1057 If both operands are positive then no adjustment is needed.
1059 If the operands have different signs, for example op0_low < 0 and
1060 op1_low >= 0, the instruction treats the most significant bit of
1061 op0_low as a sign bit instead of a bit with significance
1062 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1063 with 2**BITS_PER_WORD - op0_low, and two's complements the
1064 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1067 Similarly, if both operands are negative, we need to add
1068 (op0_low + op1_low) * 2**BITS_PER_WORD.
1070 We use a trick to adjust quickly. We logically shift op0_low right
1071 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1072 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1073 logical shift exists, we do an arithmetic right shift and subtract
1077 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1078 bool umulp
, enum optab_methods methods
)
1080 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1081 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1082 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1083 rtx product
, adjust
, product_high
, temp
;
1085 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1086 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1087 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1088 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1090 /* If we're using an unsigned multiply to directly compute the product
1091 of the low-order words of the operands and perform any required
1092 adjustments of the operands, we begin by trying two more multiplications
1093 and then computing the appropriate sum.
1095 We have checked above that the required addition is provided.
1096 Full-word addition will normally always succeed, especially if
1097 it is provided at all, so we don't worry about its failure. The
1098 multiplication may well fail, however, so we do handle that. */
1102 /* ??? This could be done with emit_store_flag where available. */
1103 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1104 NULL_RTX
, 1, methods
);
1106 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1107 NULL_RTX
, 0, OPTAB_DIRECT
);
1110 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1111 NULL_RTX
, 0, methods
);
1114 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1115 NULL_RTX
, 0, OPTAB_DIRECT
);
1122 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1123 NULL_RTX
, 0, OPTAB_DIRECT
);
1127 /* OP0_HIGH should now be dead. */
1131 /* ??? This could be done with emit_store_flag where available. */
1132 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1133 NULL_RTX
, 1, methods
);
1135 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1136 NULL_RTX
, 0, OPTAB_DIRECT
);
1139 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1140 NULL_RTX
, 0, methods
);
1143 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1144 NULL_RTX
, 0, OPTAB_DIRECT
);
1151 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1152 NULL_RTX
, 0, OPTAB_DIRECT
);
1156 /* OP1_HIGH should now be dead. */
1158 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1159 adjust
, 0, OPTAB_DIRECT
);
1161 if (target
&& !REG_P (target
))
1165 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1166 target
, 1, OPTAB_DIRECT
);
1168 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1169 target
, 1, OPTAB_DIRECT
);
1174 product_high
= operand_subword (product
, high
, 1, mode
);
1175 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1176 REG_P (product_high
) ? product_high
: adjust
,
1178 emit_move_insn (product_high
, adjust
);
1182 /* Wrapper around expand_binop which takes an rtx code to specify
1183 the operation to perform, not an optab pointer. All other
1184 arguments are the same. */
1186 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1187 rtx op1
, rtx target
, int unsignedp
,
1188 enum optab_methods methods
)
1190 optab binop
= code_to_optab
[(int) code
];
1193 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1196 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1197 binop. Order them according to commutative_operand_precedence and, if
1198 possible, try to put TARGET or a pseudo first. */
1200 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1202 int op0_prec
= commutative_operand_precedence (op0
);
1203 int op1_prec
= commutative_operand_precedence (op1
);
1205 if (op0_prec
< op1_prec
)
1208 if (op0_prec
> op1_prec
)
1211 /* With equal precedence, both orders are ok, but it is better if the
1212 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1213 if (target
== 0 || REG_P (target
))
1214 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1216 return rtx_equal_p (op1
, target
);
1220 /* Generate code to perform an operation specified by BINOPTAB
1221 on operands OP0 and OP1, with result having machine-mode MODE.
1223 UNSIGNEDP is for the case where we have to widen the operands
1224 to perform the operation. It says to use zero-extension.
1226 If TARGET is nonzero, the value
1227 is generated there, if it is convenient to do so.
1228 In all cases an rtx is returned for the locus of the value;
1229 this may or may not be TARGET. */
1232 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1233 rtx target
, int unsignedp
, enum optab_methods methods
)
1235 enum optab_methods next_methods
1236 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1237 ? OPTAB_WIDEN
: methods
);
1238 enum mode_class
class;
1239 enum machine_mode wider_mode
;
1241 int commutative_op
= 0;
1242 int shift_op
= (binoptab
->code
== ASHIFT
1243 || binoptab
->code
== ASHIFTRT
1244 || binoptab
->code
== LSHIFTRT
1245 || binoptab
->code
== ROTATE
1246 || binoptab
->code
== ROTATERT
);
1247 rtx entry_last
= get_last_insn ();
1249 bool first_pass_p
= true;
1251 class = GET_MODE_CLASS (mode
);
1253 /* If subtracting an integer constant, convert this into an addition of
1254 the negated constant. */
1256 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1258 op1
= negate_rtx (mode
, op1
);
1259 binoptab
= add_optab
;
1262 /* If we are inside an appropriately-short loop and we are optimizing,
1263 force expensive constants into a register. */
1264 if (CONSTANT_P (op0
) && optimize
1265 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1267 if (GET_MODE (op0
) != VOIDmode
)
1268 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1269 op0
= force_reg (mode
, op0
);
1272 if (CONSTANT_P (op1
) && optimize
1273 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1275 if (GET_MODE (op1
) != VOIDmode
)
1276 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1277 op1
= force_reg (mode
, op1
);
1280 /* Record where to delete back to if we backtrack. */
1281 last
= get_last_insn ();
1283 /* If operation is commutative,
1284 try to make the first operand a register.
1285 Even better, try to make it the same as the target.
1286 Also try to make the last operand a constant. */
1287 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1288 || binoptab
== smul_widen_optab
1289 || binoptab
== umul_widen_optab
1290 || binoptab
== smul_highpart_optab
1291 || binoptab
== umul_highpart_optab
)
1295 if (swap_commutative_operands_with_target (target
, op0
, op1
))
1305 /* If we can do it with a three-operand insn, do so. */
1307 if (methods
!= OPTAB_MUST_WIDEN
1308 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1310 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1311 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1312 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1313 enum machine_mode tmp_mode
;
1315 rtx xop0
= op0
, xop1
= op1
;
1320 temp
= gen_reg_rtx (mode
);
1322 /* If it is a commutative operator and the modes would match
1323 if we would swap the operands, we can save the conversions. */
1326 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1327 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1331 tmp
= op0
; op0
= op1
; op1
= tmp
;
1332 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1336 /* In case the insn wants input operands in modes different from
1337 those of the actual operands, convert the operands. It would
1338 seem that we don't need to convert CONST_INTs, but we do, so
1339 that they're properly zero-extended, sign-extended or truncated
1342 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1343 xop0
= convert_modes (mode0
,
1344 GET_MODE (op0
) != VOIDmode
1349 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1350 xop1
= convert_modes (mode1
,
1351 GET_MODE (op1
) != VOIDmode
1356 /* Now, if insn's predicates don't allow our operands, put them into
1359 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1360 && mode0
!= VOIDmode
)
1361 xop0
= copy_to_mode_reg (mode0
, xop0
);
1363 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1364 && mode1
!= VOIDmode
)
1365 xop1
= copy_to_mode_reg (mode1
, xop1
);
1367 if (binoptab
== vec_pack_mod_optab
1368 || binoptab
== vec_pack_usat_optab
1369 || binoptab
== vec_pack_ssat_optab
)
1371 /* The mode of the result is different then the mode of the
1373 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1374 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1380 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1381 temp
= gen_reg_rtx (tmp_mode
);
1383 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1386 /* If PAT is composed of more than one insn, try to add an appropriate
1387 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1388 operand, call ourselves again, this time without a target. */
1389 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1390 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1392 delete_insns_since (last
);
1393 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1394 unsignedp
, methods
);
1401 delete_insns_since (last
);
1404 /* If we were trying to rotate by a constant value, and that didn't
1405 work, try rotating the other direction before falling back to
1406 shifts and bitwise-or. */
1408 && (binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1409 && class == MODE_INT
1410 && GET_CODE (op1
) == CONST_INT
1412 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1414 first_pass_p
= false;
1415 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
1416 binoptab
= binoptab
== rotl_optab
? rotr_optab
: rotl_optab
;
1420 /* If this is a multiply, see if we can do a widening operation that
1421 takes operands of this mode and makes a wider mode. */
1423 if (binoptab
== smul_optab
1424 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1425 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1426 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1427 != CODE_FOR_nothing
))
1429 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1430 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1431 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1435 if (GET_MODE_CLASS (mode
) == MODE_INT
1436 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1437 GET_MODE_BITSIZE (GET_MODE (temp
))))
1438 return gen_lowpart (mode
, temp
);
1440 return convert_to_mode (mode
, temp
, unsignedp
);
1444 /* Look for a wider mode of the same class for which we think we
1445 can open-code the operation. Check for a widening multiply at the
1446 wider mode as well. */
1448 if (CLASS_HAS_WIDER_MODES_P (class)
1449 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1450 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1451 wider_mode
!= VOIDmode
;
1452 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1454 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1455 || (binoptab
== smul_optab
1456 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1457 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1458 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1459 != CODE_FOR_nothing
)))
1461 rtx xop0
= op0
, xop1
= op1
;
1464 /* For certain integer operations, we need not actually extend
1465 the narrow operands, as long as we will truncate
1466 the results to the same narrowness. */
1468 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1469 || binoptab
== xor_optab
1470 || binoptab
== add_optab
|| binoptab
== sub_optab
1471 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1472 && class == MODE_INT
)
1475 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1477 /* The second operand of a shift must always be extended. */
1478 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1479 no_extend
&& binoptab
!= ashl_optab
);
1481 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1482 unsignedp
, OPTAB_DIRECT
);
1485 if (class != MODE_INT
1486 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1487 GET_MODE_BITSIZE (wider_mode
)))
1490 target
= gen_reg_rtx (mode
);
1491 convert_move (target
, temp
, 0);
1495 return gen_lowpart (mode
, temp
);
1498 delete_insns_since (last
);
1502 /* These can be done a word at a time. */
1503 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1504 && class == MODE_INT
1505 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1506 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1512 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1513 won't be accurate, so use a new target. */
1514 if (target
== 0 || target
== op0
|| target
== op1
)
1515 target
= gen_reg_rtx (mode
);
1519 /* Do the actual arithmetic. */
1520 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1522 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1523 rtx x
= expand_binop (word_mode
, binoptab
,
1524 operand_subword_force (op0
, i
, mode
),
1525 operand_subword_force (op1
, i
, mode
),
1526 target_piece
, unsignedp
, next_methods
);
1531 if (target_piece
!= x
)
1532 emit_move_insn (target_piece
, x
);
1535 insns
= get_insns ();
1538 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1540 if (binoptab
->code
!= UNKNOWN
)
1542 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1543 copy_rtx (op0
), copy_rtx (op1
));
1547 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1552 /* Synthesize double word shifts from single word shifts. */
1553 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1554 || binoptab
== ashr_optab
)
1555 && class == MODE_INT
1556 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1557 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1558 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1559 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1560 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1562 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1563 enum machine_mode op1_mode
;
1565 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1566 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1567 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1569 /* Apply the truncation to constant shifts. */
1570 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1571 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1573 if (op1
== CONST0_RTX (op1_mode
))
1576 /* Make sure that this is a combination that expand_doubleword_shift
1577 can handle. See the comments there for details. */
1578 if (double_shift_mask
== 0
1579 || (shift_mask
== BITS_PER_WORD
- 1
1580 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1582 rtx insns
, equiv_value
;
1583 rtx into_target
, outof_target
;
1584 rtx into_input
, outof_input
;
1585 int left_shift
, outof_word
;
1587 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1588 won't be accurate, so use a new target. */
1589 if (target
== 0 || target
== op0
|| target
== op1
)
1590 target
= gen_reg_rtx (mode
);
1594 /* OUTOF_* is the word we are shifting bits away from, and
1595 INTO_* is the word that we are shifting bits towards, thus
1596 they differ depending on the direction of the shift and
1597 WORDS_BIG_ENDIAN. */
1599 left_shift
= binoptab
== ashl_optab
;
1600 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1602 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1603 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1605 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1606 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1608 if (expand_doubleword_shift (op1_mode
, binoptab
,
1609 outof_input
, into_input
, op1
,
1610 outof_target
, into_target
,
1611 unsignedp
, next_methods
, shift_mask
))
1613 insns
= get_insns ();
1616 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1617 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1624 /* Synthesize double word rotates from single word shifts. */
1625 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1626 && class == MODE_INT
1627 && GET_CODE (op1
) == CONST_INT
1628 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1629 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1630 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1633 rtx into_target
, outof_target
;
1634 rtx into_input
, outof_input
;
1636 int shift_count
, left_shift
, outof_word
;
1638 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1639 won't be accurate, so use a new target. Do this also if target is not
1640 a REG, first because having a register instead may open optimization
1641 opportunities, and second because if target and op0 happen to be MEMs
1642 designating the same location, we would risk clobbering it too early
1643 in the code sequence we generate below. */
1644 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1645 target
= gen_reg_rtx (mode
);
1649 shift_count
= INTVAL (op1
);
1651 /* OUTOF_* is the word we are shifting bits away from, and
1652 INTO_* is the word that we are shifting bits towards, thus
1653 they differ depending on the direction of the shift and
1654 WORDS_BIG_ENDIAN. */
1656 left_shift
= (binoptab
== rotl_optab
);
1657 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1659 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1660 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1662 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1663 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1665 if (shift_count
== BITS_PER_WORD
)
1667 /* This is just a word swap. */
1668 emit_move_insn (outof_target
, into_input
);
1669 emit_move_insn (into_target
, outof_input
);
1674 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1675 rtx first_shift_count
, second_shift_count
;
1676 optab reverse_unsigned_shift
, unsigned_shift
;
1678 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1679 ? lshr_optab
: ashl_optab
);
1681 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1682 ? ashl_optab
: lshr_optab
);
1684 if (shift_count
> BITS_PER_WORD
)
1686 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1687 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1691 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1692 second_shift_count
= GEN_INT (shift_count
);
1695 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1696 outof_input
, first_shift_count
,
1697 NULL_RTX
, unsignedp
, next_methods
);
1698 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1699 into_input
, second_shift_count
,
1700 NULL_RTX
, unsignedp
, next_methods
);
1702 if (into_temp1
!= 0 && into_temp2
!= 0)
1703 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1704 into_target
, unsignedp
, next_methods
);
1708 if (inter
!= 0 && inter
!= into_target
)
1709 emit_move_insn (into_target
, inter
);
1711 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1712 into_input
, first_shift_count
,
1713 NULL_RTX
, unsignedp
, next_methods
);
1714 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1715 outof_input
, second_shift_count
,
1716 NULL_RTX
, unsignedp
, next_methods
);
1718 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1719 inter
= expand_binop (word_mode
, ior_optab
,
1720 outof_temp1
, outof_temp2
,
1721 outof_target
, unsignedp
, next_methods
);
1723 if (inter
!= 0 && inter
!= outof_target
)
1724 emit_move_insn (outof_target
, inter
);
1727 insns
= get_insns ();
1732 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1733 block to help the register allocator a bit. But a multi-word
1734 rotate will need all the input bits when setting the output
1735 bits, so there clearly is a conflict between the input and
1736 output registers. So we can't use a no-conflict block here. */
1742 /* These can be done a word at a time by propagating carries. */
1743 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1744 && class == MODE_INT
1745 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1746 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1749 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1750 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1751 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1752 rtx xop0
, xop1
, xtarget
;
1754 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1755 value is one of those, use it. Otherwise, use 1 since it is the
1756 one easiest to get. */
1757 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1758 int normalizep
= STORE_FLAG_VALUE
;
1763 /* Prepare the operands. */
1764 xop0
= force_reg (mode
, op0
);
1765 xop1
= force_reg (mode
, op1
);
1767 xtarget
= gen_reg_rtx (mode
);
1769 if (target
== 0 || !REG_P (target
))
1772 /* Indicate for flow that the entire target reg is being set. */
1774 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1776 /* Do the actual arithmetic. */
1777 for (i
= 0; i
< nwords
; i
++)
1779 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1780 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1781 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1782 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1785 /* Main add/subtract of the input operands. */
1786 x
= expand_binop (word_mode
, binoptab
,
1787 op0_piece
, op1_piece
,
1788 target_piece
, unsignedp
, next_methods
);
1794 /* Store carry from main add/subtract. */
1795 carry_out
= gen_reg_rtx (word_mode
);
1796 carry_out
= emit_store_flag_force (carry_out
,
1797 (binoptab
== add_optab
1800 word_mode
, 1, normalizep
);
1807 /* Add/subtract previous carry to main result. */
1808 newx
= expand_binop (word_mode
,
1809 normalizep
== 1 ? binoptab
: otheroptab
,
1811 NULL_RTX
, 1, next_methods
);
1815 /* Get out carry from adding/subtracting carry in. */
1816 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1817 carry_tmp
= emit_store_flag_force (carry_tmp
,
1818 (binoptab
== add_optab
1821 word_mode
, 1, normalizep
);
1823 /* Logical-ior the two poss. carry together. */
1824 carry_out
= expand_binop (word_mode
, ior_optab
,
1825 carry_out
, carry_tmp
,
1826 carry_out
, 0, next_methods
);
1830 emit_move_insn (target_piece
, newx
);
1834 if (x
!= target_piece
)
1835 emit_move_insn (target_piece
, x
);
1838 carry_in
= carry_out
;
1841 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1843 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1844 || ! rtx_equal_p (target
, xtarget
))
1846 rtx temp
= emit_move_insn (target
, xtarget
);
1848 set_unique_reg_note (temp
,
1850 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1861 delete_insns_since (last
);
1864 /* Attempt to synthesize double word multiplies using a sequence of word
1865 mode multiplications. We first attempt to generate a sequence using a
1866 more efficient unsigned widening multiply, and if that fails we then
1867 try using a signed widening multiply. */
1869 if (binoptab
== smul_optab
1870 && class == MODE_INT
1871 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1872 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1873 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1875 rtx product
= NULL_RTX
;
1877 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1878 != CODE_FOR_nothing
)
1880 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1883 delete_insns_since (last
);
1886 if (product
== NULL_RTX
1887 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1888 != CODE_FOR_nothing
)
1890 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1893 delete_insns_since (last
);
1896 if (product
!= NULL_RTX
)
1898 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1900 temp
= emit_move_insn (target
? target
: product
, product
);
1901 set_unique_reg_note (temp
,
1903 gen_rtx_fmt_ee (MULT
, mode
,
1911 /* It can't be open-coded in this mode.
1912 Use a library call if one is available and caller says that's ok. */
1914 if (binoptab
->handlers
[(int) mode
].libfunc
1915 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1919 enum machine_mode op1_mode
= mode
;
1926 op1_mode
= word_mode
;
1927 /* Specify unsigned here,
1928 since negative shift counts are meaningless. */
1929 op1x
= convert_to_mode (word_mode
, op1
, 1);
1932 if (GET_MODE (op0
) != VOIDmode
1933 && GET_MODE (op0
) != mode
)
1934 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1936 /* Pass 1 for NO_QUEUE so we don't lose any increments
1937 if the libcall is cse'd or moved. */
1938 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1939 NULL_RTX
, LCT_CONST
, mode
, 2,
1940 op0
, mode
, op1x
, op1_mode
);
1942 insns
= get_insns ();
1945 target
= gen_reg_rtx (mode
);
1946 emit_libcall_block (insns
, target
, value
,
1947 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1952 delete_insns_since (last
);
1954 /* It can't be done in this mode. Can we do it in a wider mode? */
1956 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1957 || methods
== OPTAB_MUST_WIDEN
))
1959 /* Caller says, don't even try. */
1960 delete_insns_since (entry_last
);
1964 /* Compute the value of METHODS to pass to recursive calls.
1965 Don't allow widening to be tried recursively. */
1967 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1969 /* Look for a wider mode of the same class for which it appears we can do
1972 if (CLASS_HAS_WIDER_MODES_P (class))
1974 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1975 wider_mode
!= VOIDmode
;
1976 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1978 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1979 != CODE_FOR_nothing
)
1980 || (methods
== OPTAB_LIB
1981 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1983 rtx xop0
= op0
, xop1
= op1
;
1986 /* For certain integer operations, we need not actually extend
1987 the narrow operands, as long as we will truncate
1988 the results to the same narrowness. */
1990 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1991 || binoptab
== xor_optab
1992 || binoptab
== add_optab
|| binoptab
== sub_optab
1993 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1994 && class == MODE_INT
)
1997 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1998 unsignedp
, no_extend
);
2000 /* The second operand of a shift must always be extended. */
2001 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2002 no_extend
&& binoptab
!= ashl_optab
);
2004 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2005 unsignedp
, methods
);
2008 if (class != MODE_INT
2009 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2010 GET_MODE_BITSIZE (wider_mode
)))
2013 target
= gen_reg_rtx (mode
);
2014 convert_move (target
, temp
, 0);
2018 return gen_lowpart (mode
, temp
);
2021 delete_insns_since (last
);
2026 delete_insns_since (entry_last
);
2030 /* Expand a binary operator which has both signed and unsigned forms.
2031 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2034 If we widen unsigned operands, we may use a signed wider operation instead
2035 of an unsigned wider operation, since the result would be the same. */
2038 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2039 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2040 enum optab_methods methods
)
2043 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2044 struct optab wide_soptab
;
2046 /* Do it without widening, if possible. */
2047 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2048 unsignedp
, OPTAB_DIRECT
);
2049 if (temp
|| methods
== OPTAB_DIRECT
)
2052 /* Try widening to a signed int. Make a fake signed optab that
2053 hides any signed insn for direct use. */
2054 wide_soptab
= *soptab
;
2055 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2056 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2058 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2059 unsignedp
, OPTAB_WIDEN
);
2061 /* For unsigned operands, try widening to an unsigned int. */
2062 if (temp
== 0 && unsignedp
)
2063 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2064 unsignedp
, OPTAB_WIDEN
);
2065 if (temp
|| methods
== OPTAB_WIDEN
)
2068 /* Use the right width lib call if that exists. */
2069 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2070 if (temp
|| methods
== OPTAB_LIB
)
2073 /* Must widen and use a lib call, use either signed or unsigned. */
2074 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2075 unsignedp
, methods
);
2079 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2080 unsignedp
, methods
);
2084 /* Generate code to perform an operation specified by UNOPPTAB
2085 on operand OP0, with two results to TARG0 and TARG1.
2086 We assume that the order of the operands for the instruction
2087 is TARG0, TARG1, OP0.
2089 Either TARG0 or TARG1 may be zero, but what that means is that
2090 the result is not actually wanted. We will generate it into
2091 a dummy pseudo-reg and discard it. They may not both be zero.
2093 Returns 1 if this operation can be performed; 0 if not. */
2096 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2099 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2100 enum mode_class
class;
2101 enum machine_mode wider_mode
;
2102 rtx entry_last
= get_last_insn ();
2105 class = GET_MODE_CLASS (mode
);
2108 targ0
= gen_reg_rtx (mode
);
2110 targ1
= gen_reg_rtx (mode
);
2112 /* Record where to go back to if we fail. */
2113 last
= get_last_insn ();
2115 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2117 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2118 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2122 if (GET_MODE (xop0
) != VOIDmode
2123 && GET_MODE (xop0
) != mode0
)
2124 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2126 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2127 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2128 xop0
= copy_to_mode_reg (mode0
, xop0
);
2130 /* We could handle this, but we should always be called with a pseudo
2131 for our targets and all insns should take them as outputs. */
2132 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2133 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2135 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2142 delete_insns_since (last
);
2145 /* It can't be done in this mode. Can we do it in a wider mode? */
2147 if (CLASS_HAS_WIDER_MODES_P (class))
2149 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2150 wider_mode
!= VOIDmode
;
2151 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2153 if (unoptab
->handlers
[(int) wider_mode
].insn_code
2154 != CODE_FOR_nothing
)
2156 rtx t0
= gen_reg_rtx (wider_mode
);
2157 rtx t1
= gen_reg_rtx (wider_mode
);
2158 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2160 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2162 convert_move (targ0
, t0
, unsignedp
);
2163 convert_move (targ1
, t1
, unsignedp
);
2167 delete_insns_since (last
);
2172 delete_insns_since (entry_last
);
2176 /* Generate code to perform an operation specified by BINOPTAB
2177 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2178 We assume that the order of the operands for the instruction
2179 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2180 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2182 Either TARG0 or TARG1 may be zero, but what that means is that
2183 the result is not actually wanted. We will generate it into
2184 a dummy pseudo-reg and discard it. They may not both be zero.
2186 Returns 1 if this operation can be performed; 0 if not. */
2189 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2192 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2193 enum mode_class
class;
2194 enum machine_mode wider_mode
;
2195 rtx entry_last
= get_last_insn ();
2198 class = GET_MODE_CLASS (mode
);
2200 /* If we are inside an appropriately-short loop and we are optimizing,
2201 force expensive constants into a register. */
2202 if (CONSTANT_P (op0
) && optimize
2203 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2204 op0
= force_reg (mode
, op0
);
2206 if (CONSTANT_P (op1
) && optimize
2207 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2208 op1
= force_reg (mode
, op1
);
2211 targ0
= gen_reg_rtx (mode
);
2213 targ1
= gen_reg_rtx (mode
);
2215 /* Record where to go back to if we fail. */
2216 last
= get_last_insn ();
2218 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2220 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2221 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2222 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2224 rtx xop0
= op0
, xop1
= op1
;
2226 /* In case the insn wants input operands in modes different from
2227 those of the actual operands, convert the operands. It would
2228 seem that we don't need to convert CONST_INTs, but we do, so
2229 that they're properly zero-extended, sign-extended or truncated
2232 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2233 xop0
= convert_modes (mode0
,
2234 GET_MODE (op0
) != VOIDmode
2239 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2240 xop1
= convert_modes (mode1
,
2241 GET_MODE (op1
) != VOIDmode
2246 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2247 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2248 xop0
= copy_to_mode_reg (mode0
, xop0
);
2250 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2251 xop1
= copy_to_mode_reg (mode1
, xop1
);
2253 /* We could handle this, but we should always be called with a pseudo
2254 for our targets and all insns should take them as outputs. */
2255 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2256 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2258 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2265 delete_insns_since (last
);
2268 /* It can't be done in this mode. Can we do it in a wider mode? */
2270 if (CLASS_HAS_WIDER_MODES_P (class))
2272 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2273 wider_mode
!= VOIDmode
;
2274 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2276 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2277 != CODE_FOR_nothing
)
2279 rtx t0
= gen_reg_rtx (wider_mode
);
2280 rtx t1
= gen_reg_rtx (wider_mode
);
2281 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2282 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2284 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2287 convert_move (targ0
, t0
, unsignedp
);
2288 convert_move (targ1
, t1
, unsignedp
);
2292 delete_insns_since (last
);
2297 delete_insns_since (entry_last
);
2301 /* Expand the two-valued library call indicated by BINOPTAB, but
2302 preserve only one of the values. If TARG0 is non-NULL, the first
2303 value is placed into TARG0; otherwise the second value is placed
2304 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2305 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2306 This routine assumes that the value returned by the library call is
2307 as if the return value was of an integral mode twice as wide as the
2308 mode of OP0. Returns 1 if the call was successful. */
2311 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2312 rtx targ0
, rtx targ1
, enum rtx_code code
)
2314 enum machine_mode mode
;
2315 enum machine_mode libval_mode
;
2319 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2320 gcc_assert (!targ0
!= !targ1
);
2322 mode
= GET_MODE (op0
);
2323 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2326 /* The value returned by the library function will have twice as
2327 many bits as the nominal MODE. */
2328 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2331 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2332 NULL_RTX
, LCT_CONST
,
2336 /* Get the part of VAL containing the value that we want. */
2337 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2338 targ0
? 0 : GET_MODE_SIZE (mode
));
2339 insns
= get_insns ();
2341 /* Move the into the desired location. */
2342 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2343 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2349 /* Wrapper around expand_unop which takes an rtx code to specify
2350 the operation to perform, not an optab pointer. All other
2351 arguments are the same. */
2353 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2354 rtx target
, int unsignedp
)
2356 optab unop
= code_to_optab
[(int) code
];
2359 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2365 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2367 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2369 enum mode_class
class = GET_MODE_CLASS (mode
);
2370 if (CLASS_HAS_WIDER_MODES_P (class))
2372 enum machine_mode wider_mode
;
2373 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2374 wider_mode
!= VOIDmode
;
2375 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2377 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2378 != CODE_FOR_nothing
)
2380 rtx xop0
, temp
, last
;
2382 last
= get_last_insn ();
2385 target
= gen_reg_rtx (mode
);
2386 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2387 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2389 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2390 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2391 - GET_MODE_BITSIZE (mode
)),
2392 target
, true, OPTAB_DIRECT
);
2394 delete_insns_since (last
);
2403 /* Try calculating (parity x) as (and (popcount x) 1), where
2404 popcount can also be done in a wider mode. */
2406 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2408 enum mode_class
class = GET_MODE_CLASS (mode
);
2409 if (CLASS_HAS_WIDER_MODES_P (class))
2411 enum machine_mode wider_mode
;
2412 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2413 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2415 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2416 != CODE_FOR_nothing
)
2418 rtx xop0
, temp
, last
;
2420 last
= get_last_insn ();
2423 target
= gen_reg_rtx (mode
);
2424 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2425 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2428 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2429 target
, true, OPTAB_DIRECT
);
2431 delete_insns_since (last
);
2440 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2441 conditions, VAL may already be a SUBREG against which we cannot generate
2442 a further SUBREG. In this case, we expect forcing the value into a
2443 register will work around the situation. */
2446 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2447 enum machine_mode imode
)
2450 ret
= lowpart_subreg (omode
, val
, imode
);
2453 val
= force_reg (imode
, val
);
2454 ret
= lowpart_subreg (omode
, val
, imode
);
2455 gcc_assert (ret
!= NULL
);
2460 /* Expand a floating point absolute value or negation operation via a
2461 logical operation on the sign bit. */
2464 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2465 rtx op0
, rtx target
)
2467 const struct real_format
*fmt
;
2468 int bitpos
, word
, nwords
, i
;
2469 enum machine_mode imode
;
2470 HOST_WIDE_INT hi
, lo
;
2473 /* The format has to have a simple sign bit. */
2474 fmt
= REAL_MODE_FORMAT (mode
);
2478 bitpos
= fmt
->signbit_rw
;
2482 /* Don't create negative zeros if the format doesn't support them. */
2483 if (code
== NEG
&& !fmt
->has_signed_zero
)
2486 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2488 imode
= int_mode_for_mode (mode
);
2489 if (imode
== BLKmode
)
2498 if (FLOAT_WORDS_BIG_ENDIAN
)
2499 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2501 word
= bitpos
/ BITS_PER_WORD
;
2502 bitpos
= bitpos
% BITS_PER_WORD
;
2503 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2506 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2509 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2513 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2519 if (target
== 0 || target
== op0
)
2520 target
= gen_reg_rtx (mode
);
2526 for (i
= 0; i
< nwords
; ++i
)
2528 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2529 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2533 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2535 immed_double_const (lo
, hi
, imode
),
2536 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2537 if (temp
!= targ_piece
)
2538 emit_move_insn (targ_piece
, temp
);
2541 emit_move_insn (targ_piece
, op0_piece
);
2544 insns
= get_insns ();
2547 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2548 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2552 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2553 gen_lowpart (imode
, op0
),
2554 immed_double_const (lo
, hi
, imode
),
2555 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2556 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2558 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2559 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2565 /* Generate code to perform an operation specified by UNOPTAB
2566 on operand OP0, with result having machine-mode MODE.
2568 UNSIGNEDP is for the case where we have to widen the operands
2569 to perform the operation. It says to use zero-extension.
2571 If TARGET is nonzero, the value
2572 is generated there, if it is convenient to do so.
2573 In all cases an rtx is returned for the locus of the value;
2574 this may or may not be TARGET. */
2577 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2580 enum mode_class
class;
2581 enum machine_mode wider_mode
;
2583 rtx last
= get_last_insn ();
2586 class = GET_MODE_CLASS (mode
);
2588 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2590 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2591 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2597 temp
= gen_reg_rtx (mode
);
2599 if (GET_MODE (xop0
) != VOIDmode
2600 && GET_MODE (xop0
) != mode0
)
2601 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2603 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2605 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2606 xop0
= copy_to_mode_reg (mode0
, xop0
);
2608 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2609 temp
= gen_reg_rtx (mode
);
2611 pat
= GEN_FCN (icode
) (temp
, xop0
);
2614 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2615 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2617 delete_insns_since (last
);
2618 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2626 delete_insns_since (last
);
2629 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2631 /* Widening clz needs special treatment. */
2632 if (unoptab
== clz_optab
)
2634 temp
= widen_clz (mode
, op0
, target
);
2641 /* We can't widen a bswap. */
2642 if (unoptab
== bswap_optab
)
2645 if (CLASS_HAS_WIDER_MODES_P (class))
2646 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2647 wider_mode
!= VOIDmode
;
2648 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2650 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2654 /* For certain operations, we need not actually extend
2655 the narrow operand, as long as we will truncate the
2656 results to the same narrowness. */
2658 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2659 (unoptab
== neg_optab
2660 || unoptab
== one_cmpl_optab
)
2661 && class == MODE_INT
);
2663 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2668 if (class != MODE_INT
2669 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2670 GET_MODE_BITSIZE (wider_mode
)))
2673 target
= gen_reg_rtx (mode
);
2674 convert_move (target
, temp
, 0);
2678 return gen_lowpart (mode
, temp
);
2681 delete_insns_since (last
);
2685 /* These can be done a word at a time. */
2686 if (unoptab
== one_cmpl_optab
2687 && class == MODE_INT
2688 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2689 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2694 if (target
== 0 || target
== op0
)
2695 target
= gen_reg_rtx (mode
);
2699 /* Do the actual arithmetic. */
2700 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2702 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2703 rtx x
= expand_unop (word_mode
, unoptab
,
2704 operand_subword_force (op0
, i
, mode
),
2705 target_piece
, unsignedp
);
2707 if (target_piece
!= x
)
2708 emit_move_insn (target_piece
, x
);
2711 insns
= get_insns ();
2714 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2715 gen_rtx_fmt_e (unoptab
->code
, mode
,
2720 if (unoptab
->code
== NEG
)
2722 /* Try negating floating point values by flipping the sign bit. */
2723 if (SCALAR_FLOAT_MODE_P (mode
))
2725 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2730 /* If there is no negation pattern, and we have no negative zero,
2731 try subtracting from zero. */
2732 if (!HONOR_SIGNED_ZEROS (mode
))
2734 temp
= expand_binop (mode
, (unoptab
== negv_optab
2735 ? subv_optab
: sub_optab
),
2736 CONST0_RTX (mode
), op0
, target
,
2737 unsignedp
, OPTAB_DIRECT
);
2743 /* Try calculating parity (x) as popcount (x) % 2. */
2744 if (unoptab
== parity_optab
)
2746 temp
= expand_parity (mode
, op0
, target
);
2752 /* Now try a library call in this mode. */
2753 if (unoptab
->handlers
[(int) mode
].libfunc
)
2757 enum machine_mode outmode
= mode
;
2759 /* All of these functions return small values. Thus we choose to
2760 have them return something that isn't a double-word. */
2761 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2762 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2764 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2768 /* Pass 1 for NO_QUEUE so we don't lose any increments
2769 if the libcall is cse'd or moved. */
2770 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2771 NULL_RTX
, LCT_CONST
, outmode
,
2773 insns
= get_insns ();
2776 target
= gen_reg_rtx (outmode
);
2777 emit_libcall_block (insns
, target
, value
,
2778 gen_rtx_fmt_e (unoptab
->code
, outmode
, op0
));
2783 /* It can't be done in this mode. Can we do it in a wider mode? */
2785 if (CLASS_HAS_WIDER_MODES_P (class))
2787 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2788 wider_mode
!= VOIDmode
;
2789 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2791 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2792 != CODE_FOR_nothing
)
2793 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2797 /* For certain operations, we need not actually extend
2798 the narrow operand, as long as we will truncate the
2799 results to the same narrowness. */
2801 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2802 (unoptab
== neg_optab
2803 || unoptab
== one_cmpl_optab
)
2804 && class == MODE_INT
);
2806 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2809 /* If we are generating clz using wider mode, adjust the
2811 if (unoptab
== clz_optab
&& temp
!= 0)
2812 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2813 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2814 - GET_MODE_BITSIZE (mode
)),
2815 target
, true, OPTAB_DIRECT
);
2819 if (class != MODE_INT
)
2822 target
= gen_reg_rtx (mode
);
2823 convert_move (target
, temp
, 0);
2827 return gen_lowpart (mode
, temp
);
2830 delete_insns_since (last
);
2835 /* One final attempt at implementing negation via subtraction,
2836 this time allowing widening of the operand. */
2837 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2840 temp
= expand_binop (mode
,
2841 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2842 CONST0_RTX (mode
), op0
,
2843 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2851 /* Emit code to compute the absolute value of OP0, with result to
2852 TARGET if convenient. (TARGET may be 0.) The return value says
2853 where the result actually is to be found.
2855 MODE is the mode of the operand; the mode of the result is
2856 different but can be deduced from MODE.
2861 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2862 int result_unsignedp
)
2867 result_unsignedp
= 1;
2869 /* First try to do it with a special abs instruction. */
2870 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2875 /* For floating point modes, try clearing the sign bit. */
2876 if (SCALAR_FLOAT_MODE_P (mode
))
2878 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2883 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2884 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2885 && !HONOR_SIGNED_ZEROS (mode
))
2887 rtx last
= get_last_insn ();
2889 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2891 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2897 delete_insns_since (last
);
2900 /* If this machine has expensive jumps, we can do integer absolute
2901 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2902 where W is the width of MODE. */
2904 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2906 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2907 size_int (GET_MODE_BITSIZE (mode
) - 1),
2910 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2913 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2914 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2924 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2925 int result_unsignedp
, int safe
)
2930 result_unsignedp
= 1;
2932 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2936 /* If that does not win, use conditional jump and negate. */
2938 /* It is safe to use the target if it is the same
2939 as the source if this is also a pseudo register */
2940 if (op0
== target
&& REG_P (op0
)
2941 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2944 op1
= gen_label_rtx ();
2945 if (target
== 0 || ! safe
2946 || GET_MODE (target
) != mode
2947 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2949 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2950 target
= gen_reg_rtx (mode
);
2952 emit_move_insn (target
, op0
);
2955 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2956 NULL_RTX
, NULL_RTX
, op1
);
2958 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2961 emit_move_insn (target
, op0
);
2967 /* A subroutine of expand_copysign, perform the copysign operation using the
2968 abs and neg primitives advertised to exist on the target. The assumption
2969 is that we have a split register file, and leaving op0 in fp registers,
2970 and not playing with subregs so much, will help the register allocator. */
2973 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2974 int bitpos
, bool op0_is_abs
)
2976 enum machine_mode imode
;
2977 HOST_WIDE_INT hi
, lo
;
2986 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2993 if (target
== NULL_RTX
)
2994 target
= copy_to_reg (op0
);
2996 emit_move_insn (target
, op0
);
2999 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3001 imode
= int_mode_for_mode (mode
);
3002 if (imode
== BLKmode
)
3004 op1
= gen_lowpart (imode
, op1
);
3009 if (FLOAT_WORDS_BIG_ENDIAN
)
3010 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3012 word
= bitpos
/ BITS_PER_WORD
;
3013 bitpos
= bitpos
% BITS_PER_WORD
;
3014 op1
= operand_subword_force (op1
, word
, mode
);
3017 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3020 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3024 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3028 op1
= expand_binop (imode
, and_optab
, op1
,
3029 immed_double_const (lo
, hi
, imode
),
3030 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3032 label
= gen_label_rtx ();
3033 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3035 if (GET_CODE (op0
) == CONST_DOUBLE
)
3036 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3038 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3040 emit_move_insn (target
, op0
);
3048 /* A subroutine of expand_copysign, perform the entire copysign operation
3049 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3050 is true if op0 is known to have its sign bit clear. */
3053 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3054 int bitpos
, bool op0_is_abs
)
3056 enum machine_mode imode
;
3057 HOST_WIDE_INT hi
, lo
;
3058 int word
, nwords
, i
;
3061 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3063 imode
= int_mode_for_mode (mode
);
3064 if (imode
== BLKmode
)
3073 if (FLOAT_WORDS_BIG_ENDIAN
)
3074 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3076 word
= bitpos
/ BITS_PER_WORD
;
3077 bitpos
= bitpos
% BITS_PER_WORD
;
3078 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3081 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3084 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3088 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3092 if (target
== 0 || target
== op0
|| target
== op1
)
3093 target
= gen_reg_rtx (mode
);
3099 for (i
= 0; i
< nwords
; ++i
)
3101 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3102 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3107 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3108 immed_double_const (~lo
, ~hi
, imode
),
3109 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3111 op1
= expand_binop (imode
, and_optab
,
3112 operand_subword_force (op1
, i
, mode
),
3113 immed_double_const (lo
, hi
, imode
),
3114 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3116 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3117 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3118 if (temp
!= targ_piece
)
3119 emit_move_insn (targ_piece
, temp
);
3122 emit_move_insn (targ_piece
, op0_piece
);
3125 insns
= get_insns ();
3128 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3132 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3133 immed_double_const (lo
, hi
, imode
),
3134 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3136 op0
= gen_lowpart (imode
, op0
);
3138 op0
= expand_binop (imode
, and_optab
, op0
,
3139 immed_double_const (~lo
, ~hi
, imode
),
3140 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3142 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3143 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3144 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3150 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3151 scalar floating point mode. Return NULL if we do not know how to
3152 expand the operation inline. */
3155 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3157 enum machine_mode mode
= GET_MODE (op0
);
3158 const struct real_format
*fmt
;
3162 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3163 gcc_assert (GET_MODE (op1
) == mode
);
3165 /* First try to do it with a special instruction. */
3166 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3167 target
, 0, OPTAB_DIRECT
);
3171 fmt
= REAL_MODE_FORMAT (mode
);
3172 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3176 if (GET_CODE (op0
) == CONST_DOUBLE
)
3178 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3179 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3183 if (fmt
->signbit_ro
>= 0
3184 && (GET_CODE (op0
) == CONST_DOUBLE
3185 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
3186 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
3188 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3189 fmt
->signbit_ro
, op0_is_abs
);
3194 if (fmt
->signbit_rw
< 0)
3196 return expand_copysign_bit (mode
, op0
, op1
, target
,
3197 fmt
->signbit_rw
, op0_is_abs
);
3200 /* Generate an instruction whose insn-code is INSN_CODE,
3201 with two operands: an output TARGET and an input OP0.
3202 TARGET *must* be nonzero, and the output is always stored there.
3203 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3204 the value that is stored into TARGET. */
3207 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3210 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3215 /* Now, if insn does not accept our operands, put them into pseudos. */
3217 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3218 op0
= copy_to_mode_reg (mode0
, op0
);
3220 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3221 temp
= gen_reg_rtx (GET_MODE (temp
));
3223 pat
= GEN_FCN (icode
) (temp
, op0
);
3225 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3226 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3231 emit_move_insn (target
, temp
);
3234 struct no_conflict_data
3236 rtx target
, first
, insn
;
3240 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3241 Set P->must_stay if the currently examined clobber / store has to stay
3242 in the list of insns that constitute the actual no_conflict block /
3245 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
3247 struct no_conflict_data
*p
= p0
;
3249 /* If this inns directly contributes to setting the target, it must stay. */
3250 if (reg_overlap_mentioned_p (p
->target
, dest
))
3251 p
->must_stay
= true;
3252 /* If we haven't committed to keeping any other insns in the list yet,
3253 there is nothing more to check. */
3254 else if (p
->insn
== p
->first
)
3256 /* If this insn sets / clobbers a register that feeds one of the insns
3257 already in the list, this insn has to stay too. */
3258 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3259 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3260 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3261 /* Likewise if this insn depends on a register set by a previous
3262 insn in the list, or if it sets a result (presumably a hard
3263 register) that is set or clobbered by a previous insn.
3264 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3265 SET_DEST perform the former check on the address, and the latter
3266 check on the MEM. */
3267 || (GET_CODE (set
) == SET
3268 && (modified_in_p (SET_SRC (set
), p
->first
)
3269 || modified_in_p (SET_DEST (set
), p
->first
)
3270 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3271 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3272 p
->must_stay
= true;
3275 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3276 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3277 is possible to do so. */
3280 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3282 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3284 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3285 encapsulated region would not be in one basic block, i.e. when
3286 there is a control_flow_insn_p insn between FIRST and LAST. */
3287 bool attach_libcall_retval_notes
= true;
3288 rtx insn
, next
= NEXT_INSN (last
);
3290 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3291 if (control_flow_insn_p (insn
))
3293 attach_libcall_retval_notes
= false;
3297 if (attach_libcall_retval_notes
)
3299 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3301 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3307 /* Emit code to perform a series of operations on a multi-word quantity, one
3310 Such a block is preceded by a CLOBBER of the output, consists of multiple
3311 insns, each setting one word of the output, and followed by a SET copying
3312 the output to itself.
3314 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3315 note indicating that it doesn't conflict with the (also multi-word)
3316 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3319 INSNS is a block of code generated to perform the operation, not including
3320 the CLOBBER and final copy. All insns that compute intermediate values
3321 are first emitted, followed by the block as described above.
3323 TARGET, OP0, and OP1 are the output and inputs of the operations,
3324 respectively. OP1 may be zero for a unary operation.
3326 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3329 If TARGET is not a register, INSNS is simply emitted with no special
3330 processing. Likewise if anything in INSNS is not an INSN or if
3331 there is a libcall block inside INSNS.
3333 The final insn emitted is returned. */
3336 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3338 rtx prev
, next
, first
, last
, insn
;
3340 if (!REG_P (target
) || reload_in_progress
)
3341 return emit_insn (insns
);
3343 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3344 if (!NONJUMP_INSN_P (insn
)
3345 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3346 return emit_insn (insns
);
3348 /* First emit all insns that do not store into words of the output and remove
3349 these from the list. */
3350 for (insn
= insns
; insn
; insn
= next
)
3353 struct no_conflict_data data
;
3355 next
= NEXT_INSN (insn
);
3357 /* Some ports (cris) create a libcall regions at their own. We must
3358 avoid any potential nesting of LIBCALLs. */
3359 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3360 remove_note (insn
, note
);
3361 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3362 remove_note (insn
, note
);
3364 data
.target
= target
;
3368 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3369 if (! data
.must_stay
)
3371 if (PREV_INSN (insn
))
3372 NEXT_INSN (PREV_INSN (insn
)) = next
;
3377 PREV_INSN (next
) = PREV_INSN (insn
);
3383 prev
= get_last_insn ();
3385 /* Now write the CLOBBER of the output, followed by the setting of each
3386 of the words, followed by the final copy. */
3387 if (target
!= op0
&& target
!= op1
)
3388 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3390 for (insn
= insns
; insn
; insn
= next
)
3392 next
= NEXT_INSN (insn
);
3395 if (op1
&& REG_P (op1
))
3396 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3399 if (op0
&& REG_P (op0
))
3400 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3404 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3405 != CODE_FOR_nothing
)
3407 last
= emit_move_insn (target
, target
);
3409 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3413 last
= get_last_insn ();
3415 /* Remove any existing REG_EQUAL note from "last", or else it will
3416 be mistaken for a note referring to the full contents of the
3417 alleged libcall value when found together with the REG_RETVAL
3418 note added below. An existing note can come from an insn
3419 expansion at "last". */
3420 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3424 first
= get_insns ();
3426 first
= NEXT_INSN (prev
);
3428 maybe_encapsulate_block (first
, last
, equiv
);
3433 /* Emit code to make a call to a constant function or a library call.
3435 INSNS is a list containing all insns emitted in the call.
3436 These insns leave the result in RESULT. Our block is to copy RESULT
3437 to TARGET, which is logically equivalent to EQUIV.
3439 We first emit any insns that set a pseudo on the assumption that these are
3440 loading constants into registers; doing so allows them to be safely cse'ed
3441 between blocks. Then we emit all the other insns in the block, followed by
3442 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3443 note with an operand of EQUIV.
3445 Moving assignments to pseudos outside of the block is done to improve
3446 the generated code, but is not required to generate correct code,
3447 hence being unable to move an assignment is not grounds for not making
3448 a libcall block. There are two reasons why it is safe to leave these
3449 insns inside the block: First, we know that these pseudos cannot be
3450 used in generated RTL outside the block since they are created for
3451 temporary purposes within the block. Second, CSE will not record the
3452 values of anything set inside a libcall block, so we know they must
3453 be dead at the end of the block.
3455 Except for the first group of insns (the ones setting pseudos), the
3456 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3459 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3461 rtx final_dest
= target
;
3462 rtx prev
, next
, first
, last
, insn
;
3464 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3465 into a MEM later. Protect the libcall block from this change. */
3466 if (! REG_P (target
) || REG_USERVAR_P (target
))
3467 target
= gen_reg_rtx (GET_MODE (target
));
3469 /* If we're using non-call exceptions, a libcall corresponding to an
3470 operation that may trap may also trap. */
3471 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3473 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3476 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3478 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3479 remove_note (insn
, note
);
3483 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3484 reg note to indicate that this call cannot throw or execute a nonlocal
3485 goto (unless there is already a REG_EH_REGION note, in which case
3487 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3490 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3493 XEXP (note
, 0) = constm1_rtx
;
3495 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3499 /* First emit all insns that set pseudos. Remove them from the list as
3500 we go. Avoid insns that set pseudos which were referenced in previous
3501 insns. These can be generated by move_by_pieces, for example,
3502 to update an address. Similarly, avoid insns that reference things
3503 set in previous insns. */
3505 for (insn
= insns
; insn
; insn
= next
)
3507 rtx set
= single_set (insn
);
3510 /* Some ports (cris) create a libcall regions at their own. We must
3511 avoid any potential nesting of LIBCALLs. */
3512 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3513 remove_note (insn
, note
);
3514 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3515 remove_note (insn
, note
);
3517 next
= NEXT_INSN (insn
);
3519 if (set
!= 0 && REG_P (SET_DEST (set
))
3520 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3522 struct no_conflict_data data
;
3524 data
.target
= const0_rtx
;
3528 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3529 if (! data
.must_stay
)
3531 if (PREV_INSN (insn
))
3532 NEXT_INSN (PREV_INSN (insn
)) = next
;
3537 PREV_INSN (next
) = PREV_INSN (insn
);
3543 /* Some ports use a loop to copy large arguments onto the stack.
3544 Don't move anything outside such a loop. */
3549 prev
= get_last_insn ();
3551 /* Write the remaining insns followed by the final copy. */
3553 for (insn
= insns
; insn
; insn
= next
)
3555 next
= NEXT_INSN (insn
);
3560 last
= emit_move_insn (target
, result
);
3561 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3562 != CODE_FOR_nothing
)
3563 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3566 /* Remove any existing REG_EQUAL note from "last", or else it will
3567 be mistaken for a note referring to the full contents of the
3568 libcall value when found together with the REG_RETVAL note added
3569 below. An existing note can come from an insn expansion at
3571 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3574 if (final_dest
!= target
)
3575 emit_move_insn (final_dest
, target
);
3578 first
= get_insns ();
3580 first
= NEXT_INSN (prev
);
3582 maybe_encapsulate_block (first
, last
, equiv
);
3585 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3586 PURPOSE describes how this comparison will be used. CODE is the rtx
3587 comparison code we will be using.
3589 ??? Actually, CODE is slightly weaker than that. A target is still
3590 required to implement all of the normal bcc operations, but not
3591 required to implement all (or any) of the unordered bcc operations. */
3594 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3595 enum can_compare_purpose purpose
)
3599 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3601 if (purpose
== ccp_jump
)
3602 return bcc_gen_fctn
[(int) code
] != NULL
;
3603 else if (purpose
== ccp_store_flag
)
3604 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3606 /* There's only one cmov entry point, and it's allowed to fail. */
3609 if (purpose
== ccp_jump
3610 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3612 if (purpose
== ccp_cmov
3613 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3615 if (purpose
== ccp_store_flag
3616 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3618 mode
= GET_MODE_WIDER_MODE (mode
);
3620 while (mode
!= VOIDmode
);
3625 /* This function is called when we are going to emit a compare instruction that
3626 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3628 *PMODE is the mode of the inputs (in case they are const_int).
3629 *PUNSIGNEDP nonzero says that the operands are unsigned;
3630 this matters if they need to be widened.
3632 If they have mode BLKmode, then SIZE specifies the size of both operands.
3634 This function performs all the setup necessary so that the caller only has
3635 to emit a single comparison insn. This setup can involve doing a BLKmode
3636 comparison or emitting a library call to perform the comparison if no insn
3637 is available to handle it.
3638 The values which are passed in through pointers can be modified; the caller
3639 should perform the comparison on the modified values. Constant
3640 comparisons must have already been folded. */
3643 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3644 enum machine_mode
*pmode
, int *punsignedp
,
3645 enum can_compare_purpose purpose
)
3647 enum machine_mode mode
= *pmode
;
3648 rtx x
= *px
, y
= *py
;
3649 int unsignedp
= *punsignedp
;
3651 /* If we are inside an appropriately-short loop and we are optimizing,
3652 force expensive constants into a register. */
3653 if (CONSTANT_P (x
) && optimize
3654 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3655 x
= force_reg (mode
, x
);
3657 if (CONSTANT_P (y
) && optimize
3658 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3659 y
= force_reg (mode
, y
);
3662 /* Make sure if we have a canonical comparison. The RTL
3663 documentation states that canonical comparisons are required only
3664 for targets which have cc0. */
3665 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3668 /* Don't let both operands fail to indicate the mode. */
3669 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3670 x
= force_reg (mode
, x
);
3672 /* Handle all BLKmode compares. */
3674 if (mode
== BLKmode
)
3676 enum machine_mode cmp_mode
, result_mode
;
3677 enum insn_code cmp_code
;
3682 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3686 /* Try to use a memory block compare insn - either cmpstr
3687 or cmpmem will do. */
3688 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3689 cmp_mode
!= VOIDmode
;
3690 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3692 cmp_code
= cmpmem_optab
[cmp_mode
];
3693 if (cmp_code
== CODE_FOR_nothing
)
3694 cmp_code
= cmpstr_optab
[cmp_mode
];
3695 if (cmp_code
== CODE_FOR_nothing
)
3696 cmp_code
= cmpstrn_optab
[cmp_mode
];
3697 if (cmp_code
== CODE_FOR_nothing
)
3700 /* Must make sure the size fits the insn's mode. */
3701 if ((GET_CODE (size
) == CONST_INT
3702 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3703 || (GET_MODE_BITSIZE (GET_MODE (size
))
3704 > GET_MODE_BITSIZE (cmp_mode
)))
3707 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3708 result
= gen_reg_rtx (result_mode
);
3709 size
= convert_to_mode (cmp_mode
, size
, 1);
3710 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3714 *pmode
= result_mode
;
3718 /* Otherwise call a library function, memcmp. */
3719 libfunc
= memcmp_libfunc
;
3720 length_type
= sizetype
;
3721 result_mode
= TYPE_MODE (integer_type_node
);
3722 cmp_mode
= TYPE_MODE (length_type
);
3723 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3724 TYPE_UNSIGNED (length_type
));
3726 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3733 *pmode
= result_mode
;
3737 /* Don't allow operands to the compare to trap, as that can put the
3738 compare and branch in different basic blocks. */
3739 if (flag_non_call_exceptions
)
3742 x
= force_reg (mode
, x
);
3744 y
= force_reg (mode
, y
);
3749 if (can_compare_p (*pcomparison
, mode
, purpose
))
3752 /* Handle a lib call just for the mode we are using. */
3754 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
3756 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3759 /* If we want unsigned, and this mode has a distinct unsigned
3760 comparison routine, use that. */
3761 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3762 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3764 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3765 word_mode
, 2, x
, mode
, y
, mode
);
3767 /* There are two kinds of comparison routines. Biased routines
3768 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3769 of gcc expect that the comparison operation is equivalent
3770 to the modified comparison. For signed comparisons compare the
3771 result against 1 in the biased case, and zero in the unbiased
3772 case. For unsigned comparisons always compare against 1 after
3773 biasing the unbiased result by adding 1. This gives us a way to
3779 if (!TARGET_LIB_INT_CMP_BIASED
)
3782 *px
= plus_constant (result
, 1);
3789 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3790 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3793 /* Before emitting an insn with code ICODE, make sure that X, which is going
3794 to be used for operand OPNUM of the insn, is converted from mode MODE to
3795 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3796 that it is accepted by the operand predicate. Return the new value. */
3799 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3800 enum machine_mode wider_mode
, int unsignedp
)
3802 if (mode
!= wider_mode
)
3803 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3805 if (!insn_data
[icode
].operand
[opnum
].predicate
3806 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3810 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3816 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3817 we can do the comparison.
3818 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3819 be NULL_RTX which indicates that only a comparison is to be generated. */
3822 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3823 enum rtx_code comparison
, int unsignedp
, rtx label
)
3825 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3826 enum mode_class
class = GET_MODE_CLASS (mode
);
3827 enum machine_mode wider_mode
= mode
;
3829 /* Try combined insns first. */
3832 enum insn_code icode
;
3833 PUT_MODE (test
, wider_mode
);
3837 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3839 if (icode
!= CODE_FOR_nothing
3840 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3842 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3843 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3844 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3849 /* Handle some compares against zero. */
3850 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3851 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3853 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3854 emit_insn (GEN_FCN (icode
) (x
));
3856 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3860 /* Handle compares for which there is a directly suitable insn. */
3862 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3863 if (icode
!= CODE_FOR_nothing
)
3865 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3866 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3867 emit_insn (GEN_FCN (icode
) (x
, y
));
3869 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3873 if (!CLASS_HAS_WIDER_MODES_P (class))
3876 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3878 while (wider_mode
!= VOIDmode
);
3883 /* Generate code to compare X with Y so that the condition codes are
3884 set and to jump to LABEL if the condition is true. If X is a
3885 constant and Y is not a constant, then the comparison is swapped to
3886 ensure that the comparison RTL has the canonical form.
3888 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3889 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3890 the proper branch condition code.
3892 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3894 MODE is the mode of the inputs (in case they are const_int).
3896 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3897 be passed unchanged to emit_cmp_insn, then potentially converted into an
3898 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3901 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3902 enum machine_mode mode
, int unsignedp
, rtx label
)
3904 rtx op0
= x
, op1
= y
;
3906 /* Swap operands and condition to ensure canonical RTL. */
3907 if (swap_commutative_operands_p (x
, y
))
3909 /* If we're not emitting a branch, this means some caller
3914 comparison
= swap_condition (comparison
);
3918 /* If OP0 is still a constant, then both X and Y must be constants.
3919 Force X into a register to create canonical RTL. */
3920 if (CONSTANT_P (op0
))
3921 op0
= force_reg (mode
, op0
);
3925 comparison
= unsigned_condition (comparison
);
3927 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3929 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3932 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3935 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3936 enum machine_mode mode
, int unsignedp
)
3938 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3941 /* Emit a library call comparison between floating point X and Y.
3942 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3945 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3946 enum machine_mode
*pmode
, int *punsignedp
)
3948 enum rtx_code comparison
= *pcomparison
;
3949 enum rtx_code swapped
= swap_condition (comparison
);
3950 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3953 enum machine_mode orig_mode
= GET_MODE (x
);
3954 enum machine_mode mode
;
3955 rtx value
, target
, insns
, equiv
;
3957 bool reversed_p
= false;
3959 for (mode
= orig_mode
;
3961 mode
= GET_MODE_WIDER_MODE (mode
))
3963 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3966 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3969 tmp
= x
; x
= y
; y
= tmp
;
3970 comparison
= swapped
;
3974 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3975 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3977 comparison
= reversed
;
3983 gcc_assert (mode
!= VOIDmode
);
3985 if (mode
!= orig_mode
)
3987 x
= convert_to_mode (mode
, x
, 0);
3988 y
= convert_to_mode (mode
, y
, 0);
3991 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3992 the RTL. The allows the RTL optimizers to delete the libcall if the
3993 condition can be determined at compile-time. */
3994 if (comparison
== UNORDERED
)
3996 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3997 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3998 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3999 temp
, const_true_rtx
, equiv
);
4003 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
4004 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4006 rtx true_rtx
, false_rtx
;
4011 true_rtx
= const0_rtx
;
4012 false_rtx
= const_true_rtx
;
4016 true_rtx
= const_true_rtx
;
4017 false_rtx
= const0_rtx
;
4021 true_rtx
= const1_rtx
;
4022 false_rtx
= const0_rtx
;
4026 true_rtx
= const0_rtx
;
4027 false_rtx
= constm1_rtx
;
4031 true_rtx
= constm1_rtx
;
4032 false_rtx
= const0_rtx
;
4036 true_rtx
= const0_rtx
;
4037 false_rtx
= const1_rtx
;
4043 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
4044 equiv
, true_rtx
, false_rtx
);
4049 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4050 word_mode
, 2, x
, mode
, y
, mode
);
4051 insns
= get_insns ();
4054 target
= gen_reg_rtx (word_mode
);
4055 emit_libcall_block (insns
, target
, value
, equiv
);
4057 if (comparison
== UNORDERED
4058 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4059 comparison
= reversed_p
? EQ
: NE
;
4064 *pcomparison
= comparison
;
4068 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4071 emit_indirect_jump (rtx loc
)
4073 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4075 loc
= copy_to_mode_reg (Pmode
, loc
);
4077 emit_jump_insn (gen_indirect_jump (loc
));
4081 #ifdef HAVE_conditional_move
4083 /* Emit a conditional move instruction if the machine supports one for that
4084 condition and machine mode.
4086 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4087 the mode to use should they be constants. If it is VOIDmode, they cannot
4090 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4091 should be stored there. MODE is the mode to use should they be constants.
4092 If it is VOIDmode, they cannot both be constants.
4094 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4095 is not supported. */
4098 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4099 enum machine_mode cmode
, rtx op2
, rtx op3
,
4100 enum machine_mode mode
, int unsignedp
)
4102 rtx tem
, subtarget
, comparison
, insn
;
4103 enum insn_code icode
;
4104 enum rtx_code reversed
;
4106 /* If one operand is constant, make it the second one. Only do this
4107 if the other operand is not constant as well. */
4109 if (swap_commutative_operands_p (op0
, op1
))
4114 code
= swap_condition (code
);
4117 /* get_condition will prefer to generate LT and GT even if the old
4118 comparison was against zero, so undo that canonicalization here since
4119 comparisons against zero are cheaper. */
4120 if (code
== LT
&& op1
== const1_rtx
)
4121 code
= LE
, op1
= const0_rtx
;
4122 else if (code
== GT
&& op1
== constm1_rtx
)
4123 code
= GE
, op1
= const0_rtx
;
4125 if (cmode
== VOIDmode
)
4126 cmode
= GET_MODE (op0
);
4128 if (swap_commutative_operands_p (op2
, op3
)
4129 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4138 if (mode
== VOIDmode
)
4139 mode
= GET_MODE (op2
);
4141 icode
= movcc_gen_code
[mode
];
4143 if (icode
== CODE_FOR_nothing
)
4147 target
= gen_reg_rtx (mode
);
4151 /* If the insn doesn't accept these operands, put them in pseudos. */
4153 if (!insn_data
[icode
].operand
[0].predicate
4154 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4155 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4157 if (!insn_data
[icode
].operand
[2].predicate
4158 (op2
, insn_data
[icode
].operand
[2].mode
))
4159 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4161 if (!insn_data
[icode
].operand
[3].predicate
4162 (op3
, insn_data
[icode
].operand
[3].mode
))
4163 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4165 /* Everything should now be in the suitable form, so emit the compare insn
4166 and then the conditional move. */
4169 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4171 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4172 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4173 return NULL and let the caller figure out how best to deal with this
4175 if (GET_CODE (comparison
) != code
)
4178 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4180 /* If that failed, then give up. */
4186 if (subtarget
!= target
)
4187 convert_move (target
, subtarget
, 0);
4192 /* Return nonzero if a conditional move of mode MODE is supported.
4194 This function is for combine so it can tell whether an insn that looks
4195 like a conditional move is actually supported by the hardware. If we
4196 guess wrong we lose a bit on optimization, but that's it. */
4197 /* ??? sparc64 supports conditionally moving integers values based on fp
4198 comparisons, and vice versa. How do we handle them? */
4201 can_conditionally_move_p (enum machine_mode mode
)
4203 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4209 #endif /* HAVE_conditional_move */
4211 /* Emit a conditional addition instruction if the machine supports one for that
4212 condition and machine mode.
4214 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4215 the mode to use should they be constants. If it is VOIDmode, they cannot
4218 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4219 should be stored there. MODE is the mode to use should they be constants.
4220 If it is VOIDmode, they cannot both be constants.
4222 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4223 is not supported. */
4226 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4227 enum machine_mode cmode
, rtx op2
, rtx op3
,
4228 enum machine_mode mode
, int unsignedp
)
4230 rtx tem
, subtarget
, comparison
, insn
;
4231 enum insn_code icode
;
4232 enum rtx_code reversed
;
4234 /* If one operand is constant, make it the second one. Only do this
4235 if the other operand is not constant as well. */
4237 if (swap_commutative_operands_p (op0
, op1
))
4242 code
= swap_condition (code
);
4245 /* get_condition will prefer to generate LT and GT even if the old
4246 comparison was against zero, so undo that canonicalization here since
4247 comparisons against zero are cheaper. */
4248 if (code
== LT
&& op1
== const1_rtx
)
4249 code
= LE
, op1
= const0_rtx
;
4250 else if (code
== GT
&& op1
== constm1_rtx
)
4251 code
= GE
, op1
= const0_rtx
;
4253 if (cmode
== VOIDmode
)
4254 cmode
= GET_MODE (op0
);
4256 if (swap_commutative_operands_p (op2
, op3
)
4257 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4266 if (mode
== VOIDmode
)
4267 mode
= GET_MODE (op2
);
4269 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4271 if (icode
== CODE_FOR_nothing
)
4275 target
= gen_reg_rtx (mode
);
4277 /* If the insn doesn't accept these operands, put them in pseudos. */
4279 if (!insn_data
[icode
].operand
[0].predicate
4280 (target
, insn_data
[icode
].operand
[0].mode
))
4281 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4285 if (!insn_data
[icode
].operand
[2].predicate
4286 (op2
, insn_data
[icode
].operand
[2].mode
))
4287 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4289 if (!insn_data
[icode
].operand
[3].predicate
4290 (op3
, insn_data
[icode
].operand
[3].mode
))
4291 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4293 /* Everything should now be in the suitable form, so emit the compare insn
4294 and then the conditional move. */
4297 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4299 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4300 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4301 return NULL and let the caller figure out how best to deal with this
4303 if (GET_CODE (comparison
) != code
)
4306 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4308 /* If that failed, then give up. */
4314 if (subtarget
!= target
)
4315 convert_move (target
, subtarget
, 0);
4320 /* These functions attempt to generate an insn body, rather than
4321 emitting the insn, but if the gen function already emits them, we
4322 make no attempt to turn them back into naked patterns. */
4324 /* Generate and return an insn body to add Y to X. */
4327 gen_add2_insn (rtx x
, rtx y
)
4329 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4331 gcc_assert (insn_data
[icode
].operand
[0].predicate
4332 (x
, insn_data
[icode
].operand
[0].mode
));
4333 gcc_assert (insn_data
[icode
].operand
[1].predicate
4334 (x
, insn_data
[icode
].operand
[1].mode
));
4335 gcc_assert (insn_data
[icode
].operand
[2].predicate
4336 (y
, insn_data
[icode
].operand
[2].mode
));
4338 return GEN_FCN (icode
) (x
, x
, y
);
4341 /* Generate and return an insn body to add r1 and c,
4342 storing the result in r0. */
4344 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4346 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4348 if (icode
== CODE_FOR_nothing
4349 || !(insn_data
[icode
].operand
[0].predicate
4350 (r0
, insn_data
[icode
].operand
[0].mode
))
4351 || !(insn_data
[icode
].operand
[1].predicate
4352 (r1
, insn_data
[icode
].operand
[1].mode
))
4353 || !(insn_data
[icode
].operand
[2].predicate
4354 (c
, insn_data
[icode
].operand
[2].mode
)))
4357 return GEN_FCN (icode
) (r0
, r1
, c
);
4361 have_add2_insn (rtx x
, rtx y
)
4365 gcc_assert (GET_MODE (x
) != VOIDmode
);
4367 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4369 if (icode
== CODE_FOR_nothing
)
4372 if (!(insn_data
[icode
].operand
[0].predicate
4373 (x
, insn_data
[icode
].operand
[0].mode
))
4374 || !(insn_data
[icode
].operand
[1].predicate
4375 (x
, insn_data
[icode
].operand
[1].mode
))
4376 || !(insn_data
[icode
].operand
[2].predicate
4377 (y
, insn_data
[icode
].operand
[2].mode
)))
4383 /* Generate and return an insn body to subtract Y from X. */
4386 gen_sub2_insn (rtx x
, rtx y
)
4388 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4390 gcc_assert (insn_data
[icode
].operand
[0].predicate
4391 (x
, insn_data
[icode
].operand
[0].mode
));
4392 gcc_assert (insn_data
[icode
].operand
[1].predicate
4393 (x
, insn_data
[icode
].operand
[1].mode
));
4394 gcc_assert (insn_data
[icode
].operand
[2].predicate
4395 (y
, insn_data
[icode
].operand
[2].mode
));
4397 return GEN_FCN (icode
) (x
, x
, y
);
4400 /* Generate and return an insn body to subtract r1 and c,
4401 storing the result in r0. */
4403 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4405 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4407 if (icode
== CODE_FOR_nothing
4408 || !(insn_data
[icode
].operand
[0].predicate
4409 (r0
, insn_data
[icode
].operand
[0].mode
))
4410 || !(insn_data
[icode
].operand
[1].predicate
4411 (r1
, insn_data
[icode
].operand
[1].mode
))
4412 || !(insn_data
[icode
].operand
[2].predicate
4413 (c
, insn_data
[icode
].operand
[2].mode
)))
4416 return GEN_FCN (icode
) (r0
, r1
, c
);
4420 have_sub2_insn (rtx x
, rtx y
)
4424 gcc_assert (GET_MODE (x
) != VOIDmode
);
4426 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4428 if (icode
== CODE_FOR_nothing
)
4431 if (!(insn_data
[icode
].operand
[0].predicate
4432 (x
, insn_data
[icode
].operand
[0].mode
))
4433 || !(insn_data
[icode
].operand
[1].predicate
4434 (x
, insn_data
[icode
].operand
[1].mode
))
4435 || !(insn_data
[icode
].operand
[2].predicate
4436 (y
, insn_data
[icode
].operand
[2].mode
)))
4442 /* Generate the body of an instruction to copy Y into X.
4443 It may be a list of insns, if one insn isn't enough. */
4446 gen_move_insn (rtx x
, rtx y
)
4451 emit_move_insn_1 (x
, y
);
4457 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4458 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4459 no such operation exists, CODE_FOR_nothing will be returned. */
4462 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4466 #ifdef HAVE_ptr_extend
4468 return CODE_FOR_ptr_extend
;
4471 tab
= unsignedp
? zext_optab
: sext_optab
;
4472 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4475 /* Generate the body of an insn to extend Y (with mode MFROM)
4476 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4479 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4480 enum machine_mode mfrom
, int unsignedp
)
4482 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4483 return GEN_FCN (icode
) (x
, y
);
4486 /* can_fix_p and can_float_p say whether the target machine
4487 can directly convert a given fixed point type to
4488 a given floating point type, or vice versa.
4489 The returned value is the CODE_FOR_... value to use,
4490 or CODE_FOR_nothing if these modes cannot be directly converted.
4492 *TRUNCP_PTR is set to 1 if it is necessary to output
4493 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4495 static enum insn_code
4496 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4497 int unsignedp
, int *truncp_ptr
)
4500 enum insn_code icode
;
4502 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4503 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4504 if (icode
!= CODE_FOR_nothing
)
4510 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4511 for this to work. We need to rework the fix* and ftrunc* patterns
4512 and documentation. */
4513 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4514 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4515 if (icode
!= CODE_FOR_nothing
4516 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4523 return CODE_FOR_nothing
;
4526 static enum insn_code
4527 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4532 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4533 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4536 /* Generate code to convert FROM to floating point
4537 and store in TO. FROM must be fixed point and not VOIDmode.
4538 UNSIGNEDP nonzero means regard FROM as unsigned.
4539 Normally this is done by correcting the final value
4540 if it is negative. */
4543 expand_float (rtx to
, rtx from
, int unsignedp
)
4545 enum insn_code icode
;
4547 enum machine_mode fmode
, imode
;
4548 bool can_do_signed
= false;
4550 /* Crash now, because we won't be able to decide which mode to use. */
4551 gcc_assert (GET_MODE (from
) != VOIDmode
);
4553 /* Look for an insn to do the conversion. Do it in the specified
4554 modes if possible; otherwise convert either input, output or both to
4555 wider mode. If the integer mode is wider than the mode of FROM,
4556 we can do the conversion signed even if the input is unsigned. */
4558 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4559 fmode
= GET_MODE_WIDER_MODE (fmode
))
4560 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4561 imode
= GET_MODE_WIDER_MODE (imode
))
4563 int doing_unsigned
= unsignedp
;
4565 if (fmode
!= GET_MODE (to
)
4566 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4569 icode
= can_float_p (fmode
, imode
, unsignedp
);
4570 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4572 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4573 if (scode
!= CODE_FOR_nothing
)
4574 can_do_signed
= true;
4575 if (imode
!= GET_MODE (from
))
4576 icode
= scode
, doing_unsigned
= 0;
4579 if (icode
!= CODE_FOR_nothing
)
4581 if (imode
!= GET_MODE (from
))
4582 from
= convert_to_mode (imode
, from
, unsignedp
);
4584 if (fmode
!= GET_MODE (to
))
4585 target
= gen_reg_rtx (fmode
);
4587 emit_unop_insn (icode
, target
, from
,
4588 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4591 convert_move (to
, target
, 0);
4596 /* Unsigned integer, and no way to convert directly. For binary
4597 floating point modes, convert as signed, then conditionally adjust
4599 if (unsignedp
&& can_do_signed
&& !DECIMAL_FLOAT_MODE_P (GET_MODE (to
)))
4601 rtx label
= gen_label_rtx ();
4603 REAL_VALUE_TYPE offset
;
4605 /* Look for a usable floating mode FMODE wider than the source and at
4606 least as wide as the target. Using FMODE will avoid rounding woes
4607 with unsigned values greater than the signed maximum value. */
4609 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4610 fmode
= GET_MODE_WIDER_MODE (fmode
))
4611 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4612 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4615 if (fmode
== VOIDmode
)
4617 /* There is no such mode. Pretend the target is wide enough. */
4618 fmode
= GET_MODE (to
);
4620 /* Avoid double-rounding when TO is narrower than FROM. */
4621 if ((significand_size (fmode
) + 1)
4622 < GET_MODE_BITSIZE (GET_MODE (from
)))
4625 rtx neglabel
= gen_label_rtx ();
4627 /* Don't use TARGET if it isn't a register, is a hard register,
4628 or is the wrong mode. */
4630 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4631 || GET_MODE (target
) != fmode
)
4632 target
= gen_reg_rtx (fmode
);
4634 imode
= GET_MODE (from
);
4635 do_pending_stack_adjust ();
4637 /* Test whether the sign bit is set. */
4638 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4641 /* The sign bit is not set. Convert as signed. */
4642 expand_float (target
, from
, 0);
4643 emit_jump_insn (gen_jump (label
));
4646 /* The sign bit is set.
4647 Convert to a usable (positive signed) value by shifting right
4648 one bit, while remembering if a nonzero bit was shifted
4649 out; i.e., compute (from & 1) | (from >> 1). */
4651 emit_label (neglabel
);
4652 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4653 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4654 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4656 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4658 expand_float (target
, temp
, 0);
4660 /* Multiply by 2 to undo the shift above. */
4661 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4662 target
, 0, OPTAB_LIB_WIDEN
);
4664 emit_move_insn (target
, temp
);
4666 do_pending_stack_adjust ();
4672 /* If we are about to do some arithmetic to correct for an
4673 unsigned operand, do it in a pseudo-register. */
4675 if (GET_MODE (to
) != fmode
4676 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4677 target
= gen_reg_rtx (fmode
);
4679 /* Convert as signed integer to floating. */
4680 expand_float (target
, from
, 0);
4682 /* If FROM is negative (and therefore TO is negative),
4683 correct its value by 2**bitwidth. */
4685 do_pending_stack_adjust ();
4686 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4690 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4691 temp
= expand_binop (fmode
, add_optab
, target
,
4692 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4693 target
, 0, OPTAB_LIB_WIDEN
);
4695 emit_move_insn (target
, temp
);
4697 do_pending_stack_adjust ();
4702 /* No hardware instruction available; call a library routine. */
4707 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4709 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4710 from
= convert_to_mode (SImode
, from
, unsignedp
);
4712 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4713 gcc_assert (libfunc
);
4717 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4718 GET_MODE (to
), 1, from
,
4720 insns
= get_insns ();
4723 emit_libcall_block (insns
, target
, value
,
4724 gen_rtx_FLOAT (GET_MODE (to
), from
));
4729 /* Copy result to requested destination
4730 if we have been computing in a temp location. */
4734 if (GET_MODE (target
) == GET_MODE (to
))
4735 emit_move_insn (to
, target
);
4737 convert_move (to
, target
, 0);
4741 /* Generate code to convert FROM to fixed point and store in TO. FROM
4742 must be floating point. */
4745 expand_fix (rtx to
, rtx from
, int unsignedp
)
4747 enum insn_code icode
;
4749 enum machine_mode fmode
, imode
;
4752 /* We first try to find a pair of modes, one real and one integer, at
4753 least as wide as FROM and TO, respectively, in which we can open-code
4754 this conversion. If the integer mode is wider than the mode of TO,
4755 we can do the conversion either signed or unsigned. */
4757 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4758 fmode
= GET_MODE_WIDER_MODE (fmode
))
4759 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4760 imode
= GET_MODE_WIDER_MODE (imode
))
4762 int doing_unsigned
= unsignedp
;
4764 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4765 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4766 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4768 if (icode
!= CODE_FOR_nothing
)
4770 if (fmode
!= GET_MODE (from
))
4771 from
= convert_to_mode (fmode
, from
, 0);
4775 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4776 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4780 if (imode
!= GET_MODE (to
))
4781 target
= gen_reg_rtx (imode
);
4783 emit_unop_insn (icode
, target
, from
,
4784 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4786 convert_move (to
, target
, unsignedp
);
4791 /* For an unsigned conversion, there is one more way to do it.
4792 If we have a signed conversion, we generate code that compares
4793 the real value to the largest representable positive number. If if
4794 is smaller, the conversion is done normally. Otherwise, subtract
4795 one plus the highest signed number, convert, and add it back.
4797 We only need to check all real modes, since we know we didn't find
4798 anything with a wider integer mode.
4800 This code used to extend FP value into mode wider than the destination.
4801 This is not needed. Consider, for instance conversion from SFmode
4804 The hot path through the code is dealing with inputs smaller than 2^63
4805 and doing just the conversion, so there is no bits to lose.
4807 In the other path we know the value is positive in the range 2^63..2^64-1
4808 inclusive. (as for other imput overflow happens and result is undefined)
4809 So we know that the most important bit set in mantissa corresponds to
4810 2^63. The subtraction of 2^63 should not generate any rounding as it
4811 simply clears out that bit. The rest is trivial. */
4813 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4814 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4815 fmode
= GET_MODE_WIDER_MODE (fmode
))
4816 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4820 REAL_VALUE_TYPE offset
;
4821 rtx limit
, lab1
, lab2
, insn
;
4823 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4824 real_2expN (&offset
, bitsize
- 1);
4825 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4826 lab1
= gen_label_rtx ();
4827 lab2
= gen_label_rtx ();
4829 if (fmode
!= GET_MODE (from
))
4830 from
= convert_to_mode (fmode
, from
, 0);
4832 /* See if we need to do the subtraction. */
4833 do_pending_stack_adjust ();
4834 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4837 /* If not, do the signed "fix" and branch around fixup code. */
4838 expand_fix (to
, from
, 0);
4839 emit_jump_insn (gen_jump (lab2
));
4842 /* Otherwise, subtract 2**(N-1), convert to signed number,
4843 then add 2**(N-1). Do the addition using XOR since this
4844 will often generate better code. */
4846 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4847 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4848 expand_fix (to
, target
, 0);
4849 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4851 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4853 to
, 1, OPTAB_LIB_WIDEN
);
4856 emit_move_insn (to
, target
);
4860 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4861 != CODE_FOR_nothing
)
4863 /* Make a place for a REG_NOTE and add it. */
4864 insn
= emit_move_insn (to
, to
);
4865 set_unique_reg_note (insn
,
4867 gen_rtx_fmt_e (UNSIGNED_FIX
,
4875 /* We can't do it with an insn, so use a library call. But first ensure
4876 that the mode of TO is at least as wide as SImode, since those are the
4877 only library calls we know about. */
4879 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4881 target
= gen_reg_rtx (SImode
);
4883 expand_fix (target
, from
, unsignedp
);
4891 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4892 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4893 gcc_assert (libfunc
);
4897 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4898 GET_MODE (to
), 1, from
,
4900 insns
= get_insns ();
4903 emit_libcall_block (insns
, target
, value
,
4904 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4905 GET_MODE (to
), from
));
4910 if (GET_MODE (to
) == GET_MODE (target
))
4911 emit_move_insn (to
, target
);
4913 convert_move (to
, target
, 0);
4917 /* Generate code to convert FROM to fixed point and store in TO. FROM
4918 must be floating point, TO must be signed. Use the conversion optab
4919 TAB to do the conversion. */
4922 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
4924 enum insn_code icode
;
4926 enum machine_mode fmode
, imode
;
4928 /* We first try to find a pair of modes, one real and one integer, at
4929 least as wide as FROM and TO, respectively, in which we can open-code
4930 this conversion. If the integer mode is wider than the mode of TO,
4931 we can do the conversion either signed or unsigned. */
4933 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4934 fmode
= GET_MODE_WIDER_MODE (fmode
))
4935 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4936 imode
= GET_MODE_WIDER_MODE (imode
))
4938 icode
= tab
->handlers
[imode
][fmode
].insn_code
;
4939 if (icode
!= CODE_FOR_nothing
)
4941 if (fmode
!= GET_MODE (from
))
4942 from
= convert_to_mode (fmode
, from
, 0);
4944 if (imode
!= GET_MODE (to
))
4945 target
= gen_reg_rtx (imode
);
4947 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
4949 convert_move (to
, target
, 0);
4957 /* Report whether we have an instruction to perform the operation
4958 specified by CODE on operands of mode MODE. */
4960 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4962 return (code_to_optab
[(int) code
] != 0
4963 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4964 != CODE_FOR_nothing
));
4967 /* Create a blank optab. */
4972 optab op
= ggc_alloc (sizeof (struct optab
));
4973 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4975 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4976 op
->handlers
[i
].libfunc
= 0;
4982 static convert_optab
4983 new_convert_optab (void)
4986 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4987 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4988 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4990 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4991 op
->handlers
[i
][j
].libfunc
= 0;
4996 /* Same, but fill in its code as CODE, and write it into the
4997 code_to_optab table. */
4999 init_optab (enum rtx_code code
)
5001 optab op
= new_optab ();
5003 code_to_optab
[(int) code
] = op
;
5007 /* Same, but fill in its code as CODE, and do _not_ write it into
5008 the code_to_optab table. */
5010 init_optabv (enum rtx_code code
)
5012 optab op
= new_optab ();
5017 /* Conversion optabs never go in the code_to_optab table. */
5018 static inline convert_optab
5019 init_convert_optab (enum rtx_code code
)
5021 convert_optab op
= new_convert_optab ();
5026 /* Initialize the libfunc fields of an entire group of entries in some
5027 optab. Each entry is set equal to a string consisting of a leading
5028 pair of underscores followed by a generic operation name followed by
5029 a mode name (downshifted to lowercase) followed by a single character
5030 representing the number of operands for the given operation (which is
5031 usually one of the characters '2', '3', or '4').
5033 OPTABLE is the table in which libfunc fields are to be initialized.
5034 FIRST_MODE is the first machine mode index in the given optab to
5036 LAST_MODE is the last machine mode index in the given optab to
5038 OPNAME is the generic (string) name of the operation.
5039 SUFFIX is the character which specifies the number of operands for
5040 the given generic operation.
5044 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
5045 const char *opname
, int suffix
)
5048 unsigned opname_len
= strlen (opname
);
5050 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5051 mode
= (enum machine_mode
) ((int) mode
+ 1))
5053 const char *mname
= GET_MODE_NAME (mode
);
5054 unsigned mname_len
= strlen (mname
);
5055 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5062 for (q
= opname
; *q
; )
5064 for (q
= mname
; *q
; q
++)
5065 *p
++ = TOLOWER (*q
);
5069 optable
->handlers
[(int) mode
].libfunc
5070 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5074 /* Initialize the libfunc fields of an entire group of entries in some
5075 optab which correspond to all integer mode operations. The parameters
5076 have the same meaning as similarly named ones for the `init_libfuncs'
5077 routine. (See above). */
5080 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
5082 int maxsize
= 2*BITS_PER_WORD
;
5083 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5084 maxsize
= LONG_LONG_TYPE_SIZE
;
5085 init_libfuncs (optable
, word_mode
,
5086 mode_for_size (maxsize
, MODE_INT
, 0),
5090 /* Initialize the libfunc fields of an entire group of entries in some
5091 optab which correspond to all real mode operations. The parameters
5092 have the same meaning as similarly named ones for the `init_libfuncs'
5093 routine. (See above). */
5096 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5098 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
5099 init_libfuncs (optable
, MIN_MODE_DECIMAL_FLOAT
, MAX_MODE_DECIMAL_FLOAT
,
5103 /* Initialize the libfunc fields of an entire group of entries of an
5104 inter-mode-class conversion optab. The string formation rules are
5105 similar to the ones for init_libfuncs, above, but instead of having
5106 a mode name and an operand count these functions have two mode names
5107 and no operand count. */
5109 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5110 enum mode_class from_class
,
5111 enum mode_class to_class
)
5113 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
5114 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
5115 size_t opname_len
= strlen (opname
);
5116 size_t max_mname_len
= 0;
5118 enum machine_mode fmode
, tmode
;
5119 const char *fname
, *tname
;
5121 char *libfunc_name
, *suffix
;
5124 for (fmode
= first_from_mode
;
5126 fmode
= GET_MODE_WIDER_MODE (fmode
))
5127 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
5129 for (tmode
= first_to_mode
;
5131 tmode
= GET_MODE_WIDER_MODE (tmode
))
5132 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
5134 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5135 libfunc_name
[0] = '_';
5136 libfunc_name
[1] = '_';
5137 memcpy (&libfunc_name
[2], opname
, opname_len
);
5138 suffix
= libfunc_name
+ opname_len
+ 2;
5140 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
5141 fmode
= GET_MODE_WIDER_MODE (fmode
))
5142 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
5143 tmode
= GET_MODE_WIDER_MODE (tmode
))
5145 fname
= GET_MODE_NAME (fmode
);
5146 tname
= GET_MODE_NAME (tmode
);
5149 for (q
= fname
; *q
; p
++, q
++)
5151 for (q
= tname
; *q
; p
++, q
++)
5156 tab
->handlers
[tmode
][fmode
].libfunc
5157 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5162 /* Initialize the libfunc fields of an entire group of entries of an
5163 intra-mode-class conversion optab. The string formation rules are
5164 similar to the ones for init_libfunc, above. WIDENING says whether
5165 the optab goes from narrow to wide modes or vice versa. These functions
5166 have two mode names _and_ an operand count. */
5168 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5169 enum mode_class
class, bool widening
)
5171 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
5172 size_t opname_len
= strlen (opname
);
5173 size_t max_mname_len
= 0;
5175 enum machine_mode nmode
, wmode
;
5176 const char *nname
, *wname
;
5178 char *libfunc_name
, *suffix
;
5181 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5182 nmode
= GET_MODE_WIDER_MODE (nmode
))
5183 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
5185 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5186 libfunc_name
[0] = '_';
5187 libfunc_name
[1] = '_';
5188 memcpy (&libfunc_name
[2], opname
, opname_len
);
5189 suffix
= libfunc_name
+ opname_len
+ 2;
5191 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5192 nmode
= GET_MODE_WIDER_MODE (nmode
))
5193 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
5194 wmode
= GET_MODE_WIDER_MODE (wmode
))
5196 nname
= GET_MODE_NAME (nmode
);
5197 wname
= GET_MODE_NAME (wmode
);
5200 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
5202 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
5208 tab
->handlers
[widening
? wmode
: nmode
]
5209 [widening
? nmode
: wmode
].libfunc
5210 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5217 init_one_libfunc (const char *name
)
5221 /* Create a FUNCTION_DECL that can be passed to
5222 targetm.encode_section_info. */
5223 /* ??? We don't have any type information except for this is
5224 a function. Pretend this is "int foo()". */
5225 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5226 build_function_type (integer_type_node
, NULL_TREE
));
5227 DECL_ARTIFICIAL (decl
) = 1;
5228 DECL_EXTERNAL (decl
) = 1;
5229 TREE_PUBLIC (decl
) = 1;
5231 symbol
= XEXP (DECL_RTL (decl
), 0);
5233 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5234 are the flags assigned by targetm.encode_section_info. */
5235 SET_SYMBOL_REF_DECL (symbol
, 0);
5240 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5241 MODE to NAME, which should be either 0 or a string constant. */
5243 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5246 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
5248 optable
->handlers
[mode
].libfunc
= 0;
5251 /* Call this to reset the function entry for one conversion optab
5252 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5253 either 0 or a string constant. */
5255 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5256 enum machine_mode fmode
, const char *name
)
5259 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
5261 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
5264 /* Call this once to initialize the contents of the optabs
5265 appropriately for the current target machine. */
5272 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5274 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5275 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5277 #ifdef HAVE_conditional_move
5278 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5279 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5282 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5284 vcond_gen_code
[i
] = CODE_FOR_nothing
;
5285 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
5288 add_optab
= init_optab (PLUS
);
5289 addv_optab
= init_optabv (PLUS
);
5290 sub_optab
= init_optab (MINUS
);
5291 subv_optab
= init_optabv (MINUS
);
5292 smul_optab
= init_optab (MULT
);
5293 smulv_optab
= init_optabv (MULT
);
5294 smul_highpart_optab
= init_optab (UNKNOWN
);
5295 umul_highpart_optab
= init_optab (UNKNOWN
);
5296 smul_widen_optab
= init_optab (UNKNOWN
);
5297 umul_widen_optab
= init_optab (UNKNOWN
);
5298 usmul_widen_optab
= init_optab (UNKNOWN
);
5299 sdiv_optab
= init_optab (DIV
);
5300 sdivv_optab
= init_optabv (DIV
);
5301 sdivmod_optab
= init_optab (UNKNOWN
);
5302 udiv_optab
= init_optab (UDIV
);
5303 udivmod_optab
= init_optab (UNKNOWN
);
5304 smod_optab
= init_optab (MOD
);
5305 umod_optab
= init_optab (UMOD
);
5306 fmod_optab
= init_optab (UNKNOWN
);
5307 remainder_optab
= init_optab (UNKNOWN
);
5308 ftrunc_optab
= init_optab (UNKNOWN
);
5309 and_optab
= init_optab (AND
);
5310 ior_optab
= init_optab (IOR
);
5311 xor_optab
= init_optab (XOR
);
5312 ashl_optab
= init_optab (ASHIFT
);
5313 ashr_optab
= init_optab (ASHIFTRT
);
5314 lshr_optab
= init_optab (LSHIFTRT
);
5315 rotl_optab
= init_optab (ROTATE
);
5316 rotr_optab
= init_optab (ROTATERT
);
5317 smin_optab
= init_optab (SMIN
);
5318 smax_optab
= init_optab (SMAX
);
5319 umin_optab
= init_optab (UMIN
);
5320 umax_optab
= init_optab (UMAX
);
5321 pow_optab
= init_optab (UNKNOWN
);
5322 atan2_optab
= init_optab (UNKNOWN
);
5324 /* These three have codes assigned exclusively for the sake of
5326 mov_optab
= init_optab (SET
);
5327 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5328 cmp_optab
= init_optab (COMPARE
);
5330 ucmp_optab
= init_optab (UNKNOWN
);
5331 tst_optab
= init_optab (UNKNOWN
);
5333 eq_optab
= init_optab (EQ
);
5334 ne_optab
= init_optab (NE
);
5335 gt_optab
= init_optab (GT
);
5336 ge_optab
= init_optab (GE
);
5337 lt_optab
= init_optab (LT
);
5338 le_optab
= init_optab (LE
);
5339 unord_optab
= init_optab (UNORDERED
);
5341 neg_optab
= init_optab (NEG
);
5342 negv_optab
= init_optabv (NEG
);
5343 abs_optab
= init_optab (ABS
);
5344 absv_optab
= init_optabv (ABS
);
5345 addcc_optab
= init_optab (UNKNOWN
);
5346 one_cmpl_optab
= init_optab (NOT
);
5347 bswap_optab
= init_optab (BSWAP
);
5348 ffs_optab
= init_optab (FFS
);
5349 clz_optab
= init_optab (CLZ
);
5350 ctz_optab
= init_optab (CTZ
);
5351 popcount_optab
= init_optab (POPCOUNT
);
5352 parity_optab
= init_optab (PARITY
);
5353 sqrt_optab
= init_optab (SQRT
);
5354 floor_optab
= init_optab (UNKNOWN
);
5355 ceil_optab
= init_optab (UNKNOWN
);
5356 round_optab
= init_optab (UNKNOWN
);
5357 btrunc_optab
= init_optab (UNKNOWN
);
5358 nearbyint_optab
= init_optab (UNKNOWN
);
5359 rint_optab
= init_optab (UNKNOWN
);
5360 sincos_optab
= init_optab (UNKNOWN
);
5361 sin_optab
= init_optab (UNKNOWN
);
5362 asin_optab
= init_optab (UNKNOWN
);
5363 cos_optab
= init_optab (UNKNOWN
);
5364 acos_optab
= init_optab (UNKNOWN
);
5365 exp_optab
= init_optab (UNKNOWN
);
5366 exp10_optab
= init_optab (UNKNOWN
);
5367 exp2_optab
= init_optab (UNKNOWN
);
5368 expm1_optab
= init_optab (UNKNOWN
);
5369 ldexp_optab
= init_optab (UNKNOWN
);
5370 logb_optab
= init_optab (UNKNOWN
);
5371 ilogb_optab
= init_optab (UNKNOWN
);
5372 log_optab
= init_optab (UNKNOWN
);
5373 log10_optab
= init_optab (UNKNOWN
);
5374 log2_optab
= init_optab (UNKNOWN
);
5375 log1p_optab
= init_optab (UNKNOWN
);
5376 tan_optab
= init_optab (UNKNOWN
);
5377 atan_optab
= init_optab (UNKNOWN
);
5378 copysign_optab
= init_optab (UNKNOWN
);
5380 strlen_optab
= init_optab (UNKNOWN
);
5381 cbranch_optab
= init_optab (UNKNOWN
);
5382 cmov_optab
= init_optab (UNKNOWN
);
5383 cstore_optab
= init_optab (UNKNOWN
);
5384 push_optab
= init_optab (UNKNOWN
);
5386 reduc_smax_optab
= init_optab (UNKNOWN
);
5387 reduc_umax_optab
= init_optab (UNKNOWN
);
5388 reduc_smin_optab
= init_optab (UNKNOWN
);
5389 reduc_umin_optab
= init_optab (UNKNOWN
);
5390 reduc_splus_optab
= init_optab (UNKNOWN
);
5391 reduc_uplus_optab
= init_optab (UNKNOWN
);
5393 ssum_widen_optab
= init_optab (UNKNOWN
);
5394 usum_widen_optab
= init_optab (UNKNOWN
);
5395 sdot_prod_optab
= init_optab (UNKNOWN
);
5396 udot_prod_optab
= init_optab (UNKNOWN
);
5398 vec_extract_optab
= init_optab (UNKNOWN
);
5399 vec_extract_even_optab
= init_optab (UNKNOWN
);
5400 vec_extract_odd_optab
= init_optab (UNKNOWN
);
5401 vec_interleave_high_optab
= init_optab (UNKNOWN
);
5402 vec_interleave_low_optab
= init_optab (UNKNOWN
);
5403 vec_set_optab
= init_optab (UNKNOWN
);
5404 vec_init_optab
= init_optab (UNKNOWN
);
5405 vec_shl_optab
= init_optab (UNKNOWN
);
5406 vec_shr_optab
= init_optab (UNKNOWN
);
5407 vec_realign_load_optab
= init_optab (UNKNOWN
);
5408 movmisalign_optab
= init_optab (UNKNOWN
);
5409 vec_widen_umult_hi_optab
= init_optab (UNKNOWN
);
5410 vec_widen_umult_lo_optab
= init_optab (UNKNOWN
);
5411 vec_widen_smult_hi_optab
= init_optab (UNKNOWN
);
5412 vec_widen_smult_lo_optab
= init_optab (UNKNOWN
);
5413 vec_unpacks_hi_optab
= init_optab (UNKNOWN
);
5414 vec_unpacks_lo_optab
= init_optab (UNKNOWN
);
5415 vec_unpacku_hi_optab
= init_optab (UNKNOWN
);
5416 vec_unpacku_lo_optab
= init_optab (UNKNOWN
);
5417 vec_pack_mod_optab
= init_optab (UNKNOWN
);
5418 vec_pack_usat_optab
= init_optab (UNKNOWN
);
5419 vec_pack_ssat_optab
= init_optab (UNKNOWN
);
5421 powi_optab
= init_optab (UNKNOWN
);
5424 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5425 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5426 trunc_optab
= init_convert_optab (TRUNCATE
);
5427 sfix_optab
= init_convert_optab (FIX
);
5428 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5429 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5430 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5431 sfloat_optab
= init_convert_optab (FLOAT
);
5432 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5433 lrint_optab
= init_convert_optab (UNKNOWN
);
5434 lround_optab
= init_convert_optab (UNKNOWN
);
5435 lfloor_optab
= init_convert_optab (UNKNOWN
);
5436 lceil_optab
= init_convert_optab (UNKNOWN
);
5438 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5440 movmem_optab
[i
] = CODE_FOR_nothing
;
5441 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5442 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
5443 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5444 setmem_optab
[i
] = CODE_FOR_nothing
;
5446 sync_add_optab
[i
] = CODE_FOR_nothing
;
5447 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5448 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5449 sync_and_optab
[i
] = CODE_FOR_nothing
;
5450 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5451 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5452 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5453 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5454 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5455 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5456 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5457 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5458 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5459 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5460 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5461 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5462 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5463 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5464 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5465 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5466 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5467 sync_lock_release
[i
] = CODE_FOR_nothing
;
5469 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5472 /* Fill in the optabs with the insns we support. */
5475 /* Initialize the optabs with the names of the library functions. */
5476 init_integral_libfuncs (add_optab
, "add", '3');
5477 init_floating_libfuncs (add_optab
, "add", '3');
5478 init_integral_libfuncs (addv_optab
, "addv", '3');
5479 init_floating_libfuncs (addv_optab
, "add", '3');
5480 init_integral_libfuncs (sub_optab
, "sub", '3');
5481 init_floating_libfuncs (sub_optab
, "sub", '3');
5482 init_integral_libfuncs (subv_optab
, "subv", '3');
5483 init_floating_libfuncs (subv_optab
, "sub", '3');
5484 init_integral_libfuncs (smul_optab
, "mul", '3');
5485 init_floating_libfuncs (smul_optab
, "mul", '3');
5486 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5487 init_floating_libfuncs (smulv_optab
, "mul", '3');
5488 init_integral_libfuncs (sdiv_optab
, "div", '3');
5489 init_floating_libfuncs (sdiv_optab
, "div", '3');
5490 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5491 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5492 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5493 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5494 init_integral_libfuncs (smod_optab
, "mod", '3');
5495 init_integral_libfuncs (umod_optab
, "umod", '3');
5496 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5497 init_integral_libfuncs (and_optab
, "and", '3');
5498 init_integral_libfuncs (ior_optab
, "ior", '3');
5499 init_integral_libfuncs (xor_optab
, "xor", '3');
5500 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5501 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5502 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5503 init_integral_libfuncs (smin_optab
, "min", '3');
5504 init_floating_libfuncs (smin_optab
, "min", '3');
5505 init_integral_libfuncs (smax_optab
, "max", '3');
5506 init_floating_libfuncs (smax_optab
, "max", '3');
5507 init_integral_libfuncs (umin_optab
, "umin", '3');
5508 init_integral_libfuncs (umax_optab
, "umax", '3');
5509 init_integral_libfuncs (neg_optab
, "neg", '2');
5510 init_floating_libfuncs (neg_optab
, "neg", '2');
5511 init_integral_libfuncs (negv_optab
, "negv", '2');
5512 init_floating_libfuncs (negv_optab
, "neg", '2');
5513 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5514 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5515 init_integral_libfuncs (clz_optab
, "clz", '2');
5516 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5517 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5518 init_integral_libfuncs (parity_optab
, "parity", '2');
5520 /* Comparison libcalls for integers MUST come in pairs,
5522 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5523 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5524 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5526 /* EQ etc are floating point only. */
5527 init_floating_libfuncs (eq_optab
, "eq", '2');
5528 init_floating_libfuncs (ne_optab
, "ne", '2');
5529 init_floating_libfuncs (gt_optab
, "gt", '2');
5530 init_floating_libfuncs (ge_optab
, "ge", '2');
5531 init_floating_libfuncs (lt_optab
, "lt", '2');
5532 init_floating_libfuncs (le_optab
, "le", '2');
5533 init_floating_libfuncs (unord_optab
, "unord", '2');
5535 init_floating_libfuncs (powi_optab
, "powi", '2');
5538 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5539 MODE_INT
, MODE_FLOAT
);
5540 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5541 MODE_INT
, MODE_DECIMAL_FLOAT
);
5542 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5543 MODE_INT
, MODE_FLOAT
);
5544 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5545 MODE_INT
, MODE_DECIMAL_FLOAT
);
5546 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5547 MODE_FLOAT
, MODE_INT
);
5548 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5549 MODE_DECIMAL_FLOAT
, MODE_INT
);
5550 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5551 MODE_FLOAT
, MODE_INT
);
5552 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5553 MODE_DECIMAL_FLOAT
, MODE_INT
);
5554 init_interclass_conv_libfuncs (ufloat_optab
, "floatuns",
5555 MODE_INT
, MODE_DECIMAL_FLOAT
);
5556 init_interclass_conv_libfuncs (lrint_optab
, "lrint",
5557 MODE_INT
, MODE_FLOAT
);
5558 init_interclass_conv_libfuncs (lround_optab
, "lround",
5559 MODE_INT
, MODE_FLOAT
);
5560 init_interclass_conv_libfuncs (lfloor_optab
, "lfloor",
5561 MODE_INT
, MODE_FLOAT
);
5562 init_interclass_conv_libfuncs (lceil_optab
, "lceil",
5563 MODE_INT
, MODE_FLOAT
);
5565 /* sext_optab is also used for FLOAT_EXTEND. */
5566 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5567 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_DECIMAL_FLOAT
, true);
5568 init_interclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, MODE_DECIMAL_FLOAT
);
5569 init_interclass_conv_libfuncs (sext_optab
, "extend", MODE_DECIMAL_FLOAT
, MODE_FLOAT
);
5570 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5571 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_DECIMAL_FLOAT
, false);
5572 init_interclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, MODE_DECIMAL_FLOAT
);
5573 init_interclass_conv_libfuncs (trunc_optab
, "trunc", MODE_DECIMAL_FLOAT
, MODE_FLOAT
);
5575 /* Explicitly initialize the bswap libfuncs since we need them to be
5576 valid for things other than word_mode. */
5577 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
5578 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
5580 /* Use cabs for double complex abs, since systems generally have cabs.
5581 Don't define any libcall for float complex, so that cabs will be used. */
5582 if (complex_double_type_node
)
5583 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5584 = init_one_libfunc ("cabs");
5586 /* The ffs function operates on `int'. */
5587 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5588 = init_one_libfunc ("ffs");
5590 abort_libfunc
= init_one_libfunc ("abort");
5591 memcpy_libfunc
= init_one_libfunc ("memcpy");
5592 memmove_libfunc
= init_one_libfunc ("memmove");
5593 memcmp_libfunc
= init_one_libfunc ("memcmp");
5594 memset_libfunc
= init_one_libfunc ("memset");
5595 setbits_libfunc
= init_one_libfunc ("__setbits");
5597 #ifndef DONT_USE_BUILTIN_SETJMP
5598 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5599 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5601 setjmp_libfunc
= init_one_libfunc ("setjmp");
5602 longjmp_libfunc
= init_one_libfunc ("longjmp");
5604 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5605 unwind_sjlj_unregister_libfunc
5606 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5608 /* For function entry/exit instrumentation. */
5609 profile_function_entry_libfunc
5610 = init_one_libfunc ("__cyg_profile_func_enter");
5611 profile_function_exit_libfunc
5612 = init_one_libfunc ("__cyg_profile_func_exit");
5614 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5616 if (HAVE_conditional_trap
)
5617 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5619 /* Allow the target to add more libcalls or rename some, etc. */
5620 targetm
.init_libfuncs ();
5625 /* Print information about the current contents of the optabs on
5629 debug_optab_libfuncs (void)
5635 /* Dump the arithmetic optabs. */
5636 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5637 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5640 struct optab_handlers
*h
;
5643 h
= &o
->handlers
[j
];
5646 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5647 fprintf (stderr
, "%s\t%s:\t%s\n",
5648 GET_RTX_NAME (o
->code
),
5650 XSTR (h
->libfunc
, 0));
5654 /* Dump the conversion optabs. */
5655 for (i
= 0; i
< (int) COI_MAX
; ++i
)
5656 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5657 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5660 struct optab_handlers
*h
;
5662 o
= &convert_optab_table
[i
];
5663 h
= &o
->handlers
[j
][k
];
5666 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5667 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5668 GET_RTX_NAME (o
->code
),
5671 XSTR (h
->libfunc
, 0));
5679 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5680 CODE. Return 0 on failure. */
5683 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5684 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5686 enum machine_mode mode
= GET_MODE (op1
);
5687 enum insn_code icode
;
5690 if (!HAVE_conditional_trap
)
5693 if (mode
== VOIDmode
)
5696 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5697 if (icode
== CODE_FOR_nothing
)
5701 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5702 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5708 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5710 PUT_CODE (trap_rtx
, code
);
5711 gcc_assert (HAVE_conditional_trap
);
5712 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5716 insn
= get_insns ();
5723 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5724 or unsigned operation code. */
5726 static enum rtx_code
5727 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5739 code
= unsignedp
? LTU
: LT
;
5742 code
= unsignedp
? LEU
: LE
;
5745 code
= unsignedp
? GTU
: GT
;
5748 code
= unsignedp
? GEU
: GE
;
5751 case UNORDERED_EXPR
:
5782 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5783 unsigned operators. Do not generate compare instruction. */
5786 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5788 enum rtx_code rcode
;
5790 rtx rtx_op0
, rtx_op1
;
5792 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5793 ensures that condition is a relational operation. */
5794 gcc_assert (COMPARISON_CLASS_P (cond
));
5796 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5797 t_op0
= TREE_OPERAND (cond
, 0);
5798 t_op1
= TREE_OPERAND (cond
, 1);
5800 /* Expand operands. */
5801 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5802 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5804 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5805 && GET_MODE (rtx_op0
) != VOIDmode
)
5806 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5808 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5809 && GET_MODE (rtx_op1
) != VOIDmode
)
5810 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5812 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5815 /* Return insn code for VEC_COND_EXPR EXPR. */
5817 static inline enum insn_code
5818 get_vcond_icode (tree expr
, enum machine_mode mode
)
5820 enum insn_code icode
= CODE_FOR_nothing
;
5822 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5823 icode
= vcondu_gen_code
[mode
];
5825 icode
= vcond_gen_code
[mode
];
5829 /* Return TRUE iff, appropriate vector insns are available
5830 for vector cond expr expr in VMODE mode. */
5833 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5835 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5840 /* Generate insns for VEC_COND_EXPR. */
5843 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5845 enum insn_code icode
;
5846 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5847 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5848 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5850 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5851 if (icode
== CODE_FOR_nothing
)
5854 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5855 target
= gen_reg_rtx (mode
);
5857 /* Get comparison rtx. First expand both cond expr operands. */
5858 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5860 cc_op0
= XEXP (comparison
, 0);
5861 cc_op1
= XEXP (comparison
, 1);
5862 /* Expand both operands and force them in reg, if required. */
5863 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5864 NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5865 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5866 && mode
!= VOIDmode
)
5867 rtx_op1
= force_reg (mode
, rtx_op1
);
5869 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5870 NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5871 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5872 && mode
!= VOIDmode
)
5873 rtx_op2
= force_reg (mode
, rtx_op2
);
5875 /* Emit instruction! */
5876 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5877 comparison
, cc_op0
, cc_op1
));
5883 /* This is an internal subroutine of the other compare_and_swap expanders.
5884 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5885 operation. TARGET is an optional place to store the value result of
5886 the operation. ICODE is the particular instruction to expand. Return
5887 the result of the operation. */
5890 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5891 rtx target
, enum insn_code icode
)
5893 enum machine_mode mode
= GET_MODE (mem
);
5896 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5897 target
= gen_reg_rtx (mode
);
5899 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5900 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5901 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5902 old_val
= force_reg (mode
, old_val
);
5904 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5905 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5906 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5907 new_val
= force_reg (mode
, new_val
);
5909 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5910 if (insn
== NULL_RTX
)
5917 /* Expand a compare-and-swap operation and return its value. */
5920 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5922 enum machine_mode mode
= GET_MODE (mem
);
5923 enum insn_code icode
= sync_compare_and_swap
[mode
];
5925 if (icode
== CODE_FOR_nothing
)
5928 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5931 /* Expand a compare-and-swap operation and store true into the result if
5932 the operation was successful and false otherwise. Return the result.
5933 Unlike other routines, TARGET is not optional. */
5936 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5938 enum machine_mode mode
= GET_MODE (mem
);
5939 enum insn_code icode
;
5940 rtx subtarget
, label0
, label1
;
5942 /* If the target supports a compare-and-swap pattern that simultaneously
5943 sets some flag for success, then use it. Otherwise use the regular
5944 compare-and-swap and follow that immediately with a compare insn. */
5945 icode
= sync_compare_and_swap_cc
[mode
];
5949 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5951 if (subtarget
!= NULL_RTX
)
5955 case CODE_FOR_nothing
:
5956 icode
= sync_compare_and_swap
[mode
];
5957 if (icode
== CODE_FOR_nothing
)
5960 /* Ensure that if old_val == mem, that we're not comparing
5961 against an old value. */
5962 if (MEM_P (old_val
))
5963 old_val
= force_reg (mode
, old_val
);
5965 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5967 if (subtarget
== NULL_RTX
)
5970 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5973 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5974 setcc instruction from the beginning. We don't work too hard here,
5975 but it's nice to not be stupid about initial code gen either. */
5976 if (STORE_FLAG_VALUE
== 1)
5978 icode
= setcc_gen_code
[EQ
];
5979 if (icode
!= CODE_FOR_nothing
)
5981 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5985 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5986 subtarget
= gen_reg_rtx (cmode
);
5988 insn
= GEN_FCN (icode
) (subtarget
);
5992 if (GET_MODE (target
) != GET_MODE (subtarget
))
5994 convert_move (target
, subtarget
, 1);
6002 /* Without an appropriate setcc instruction, use a set of branches to
6003 get 1 and 0 stored into target. Presumably if the target has a
6004 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6006 label0
= gen_label_rtx ();
6007 label1
= gen_label_rtx ();
6009 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
6010 emit_move_insn (target
, const0_rtx
);
6011 emit_jump_insn (gen_jump (label1
));
6013 emit_label (label0
);
6014 emit_move_insn (target
, const1_rtx
);
6015 emit_label (label1
);
6020 /* This is a helper function for the other atomic operations. This function
6021 emits a loop that contains SEQ that iterates until a compare-and-swap
6022 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6023 a set of instructions that takes a value from OLD_REG as an input and
6024 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6025 set to the current contents of MEM. After SEQ, a compare-and-swap will
6026 attempt to update MEM with NEW_REG. The function returns true when the
6027 loop was generated successfully. */
6030 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6032 enum machine_mode mode
= GET_MODE (mem
);
6033 enum insn_code icode
;
6034 rtx label
, cmp_reg
, subtarget
;
6036 /* The loop we want to generate looks like
6042 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6043 if (cmp_reg != old_reg)
6046 Note that we only do the plain load from memory once. Subsequent
6047 iterations use the value loaded by the compare-and-swap pattern. */
6049 label
= gen_label_rtx ();
6050 cmp_reg
= gen_reg_rtx (mode
);
6052 emit_move_insn (cmp_reg
, mem
);
6054 emit_move_insn (old_reg
, cmp_reg
);
6058 /* If the target supports a compare-and-swap pattern that simultaneously
6059 sets some flag for success, then use it. Otherwise use the regular
6060 compare-and-swap and follow that immediately with a compare insn. */
6061 icode
= sync_compare_and_swap_cc
[mode
];
6065 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6067 if (subtarget
!= NULL_RTX
)
6069 gcc_assert (subtarget
== cmp_reg
);
6074 case CODE_FOR_nothing
:
6075 icode
= sync_compare_and_swap
[mode
];
6076 if (icode
== CODE_FOR_nothing
)
6079 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6081 if (subtarget
== NULL_RTX
)
6083 if (subtarget
!= cmp_reg
)
6084 emit_move_insn (cmp_reg
, subtarget
);
6086 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
6089 /* ??? Mark this jump predicted not taken? */
6090 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
6095 /* This function generates the atomic operation MEM CODE= VAL. In this
6096 case, we do not care about any resulting value. Returns NULL if we
6097 cannot generate the operation. */
6100 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
6102 enum machine_mode mode
= GET_MODE (mem
);
6103 enum insn_code icode
;
6106 /* Look to see if the target supports the operation directly. */
6110 icode
= sync_add_optab
[mode
];
6113 icode
= sync_ior_optab
[mode
];
6116 icode
= sync_xor_optab
[mode
];
6119 icode
= sync_and_optab
[mode
];
6122 icode
= sync_nand_optab
[mode
];
6126 icode
= sync_sub_optab
[mode
];
6127 if (icode
== CODE_FOR_nothing
)
6129 icode
= sync_add_optab
[mode
];
6130 if (icode
!= CODE_FOR_nothing
)
6132 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6142 /* Generate the direct operation, if present. */
6143 if (icode
!= CODE_FOR_nothing
)
6145 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6146 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6147 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
6148 val
= force_reg (mode
, val
);
6150 insn
= GEN_FCN (icode
) (mem
, val
);
6158 /* Failing that, generate a compare-and-swap loop in which we perform the
6159 operation with normal arithmetic instructions. */
6160 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6162 rtx t0
= gen_reg_rtx (mode
), t1
;
6169 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6172 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6173 true, OPTAB_LIB_WIDEN
);
6175 insn
= get_insns ();
6178 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6185 /* This function generates the atomic operation MEM CODE= VAL. In this
6186 case, we do care about the resulting value: if AFTER is true then
6187 return the value MEM holds after the operation, if AFTER is false
6188 then return the value MEM holds before the operation. TARGET is an
6189 optional place for the result value to be stored. */
6192 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
6193 bool after
, rtx target
)
6195 enum machine_mode mode
= GET_MODE (mem
);
6196 enum insn_code old_code
, new_code
, icode
;
6200 /* Look to see if the target supports the operation directly. */
6204 old_code
= sync_old_add_optab
[mode
];
6205 new_code
= sync_new_add_optab
[mode
];
6208 old_code
= sync_old_ior_optab
[mode
];
6209 new_code
= sync_new_ior_optab
[mode
];
6212 old_code
= sync_old_xor_optab
[mode
];
6213 new_code
= sync_new_xor_optab
[mode
];
6216 old_code
= sync_old_and_optab
[mode
];
6217 new_code
= sync_new_and_optab
[mode
];
6220 old_code
= sync_old_nand_optab
[mode
];
6221 new_code
= sync_new_nand_optab
[mode
];
6225 old_code
= sync_old_sub_optab
[mode
];
6226 new_code
= sync_new_sub_optab
[mode
];
6227 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
6229 old_code
= sync_old_add_optab
[mode
];
6230 new_code
= sync_new_add_optab
[mode
];
6231 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
6233 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6243 /* If the target does supports the proper new/old operation, great. But
6244 if we only support the opposite old/new operation, check to see if we
6245 can compensate. In the case in which the old value is supported, then
6246 we can always perform the operation again with normal arithmetic. In
6247 the case in which the new value is supported, then we can only handle
6248 this in the case the operation is reversible. */
6253 if (icode
== CODE_FOR_nothing
)
6256 if (icode
!= CODE_FOR_nothing
)
6263 if (icode
== CODE_FOR_nothing
6264 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
6267 if (icode
!= CODE_FOR_nothing
)
6272 /* If we found something supported, great. */
6273 if (icode
!= CODE_FOR_nothing
)
6275 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6276 target
= gen_reg_rtx (mode
);
6278 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6279 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6280 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6281 val
= force_reg (mode
, val
);
6283 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6288 /* If we need to compensate for using an operation with the
6289 wrong return value, do so now. */
6296 else if (code
== MINUS
)
6301 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
6302 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
6303 true, OPTAB_LIB_WIDEN
);
6310 /* Failing that, generate a compare-and-swap loop in which we perform the
6311 operation with normal arithmetic instructions. */
6312 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6314 rtx t0
= gen_reg_rtx (mode
), t1
;
6316 if (!target
|| !register_operand (target
, mode
))
6317 target
= gen_reg_rtx (mode
);
6322 emit_move_insn (target
, t0
);
6326 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6329 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6330 true, OPTAB_LIB_WIDEN
);
6332 emit_move_insn (target
, t1
);
6334 insn
= get_insns ();
6337 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6344 /* This function expands a test-and-set operation. Ideally we atomically
6345 store VAL in MEM and return the previous value in MEM. Some targets
6346 may not support this operation and only support VAL with the constant 1;
6347 in this case while the return value will be 0/1, but the exact value
6348 stored in MEM is target defined. TARGET is an option place to stick
6349 the return value. */
6352 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
6354 enum machine_mode mode
= GET_MODE (mem
);
6355 enum insn_code icode
;
6358 /* If the target supports the test-and-set directly, great. */
6359 icode
= sync_lock_test_and_set
[mode
];
6360 if (icode
!= CODE_FOR_nothing
)
6362 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6363 target
= gen_reg_rtx (mode
);
6365 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6366 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6367 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6368 val
= force_reg (mode
, val
);
6370 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6378 /* Otherwise, use a compare-and-swap loop for the exchange. */
6379 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6381 if (!target
|| !register_operand (target
, mode
))
6382 target
= gen_reg_rtx (mode
);
6383 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6384 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6385 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6392 #include "gt-optabs.h"