1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[COI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
99 enum machine_mode
*, int *);
100 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
102 /* Current libcall id. It doesn't matter what these are, as long
103 as they are unique to each libcall that is emitted. */
104 static HOST_WIDE_INT libcall_id
= 0;
106 /* Debug facility for use in GDB. */
107 void debug_optab_libfuncs (void);
109 #ifndef HAVE_conditional_trap
110 #define HAVE_conditional_trap 0
111 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
114 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
115 #if ENABLE_DECIMAL_BID_FORMAT
116 #define DECIMAL_PREFIX "bid_"
118 #define DECIMAL_PREFIX "dpd_"
122 /* Info about libfunc. We use same hashtable for normal optabs and conversion
123 optab. In the first case mode2 is unused. */
124 struct libfunc_entry
GTY(())
127 enum machine_mode mode1
, mode2
;
131 /* Hash table used to convert declarations into nodes. */
132 static GTY((param_is (struct libfunc_entry
))) htab_t libfunc_hash
;
134 /* Used for attribute_hash. */
137 hash_libfunc (const void *p
)
139 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
141 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
145 /* Used for optab_hash. */
148 eq_libfunc (const void *p
, const void *q
)
150 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
151 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
153 return (e1
->optab
== e2
->optab
154 && e1
->mode1
== e2
->mode1
155 && e1
->mode2
== e2
->mode2
);
158 /* Return libfunc corresponding operation defined by OPTAB converting
159 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
160 if no libfunc is available. */
162 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
163 enum machine_mode mode2
)
165 struct libfunc_entry e
;
166 struct libfunc_entry
**slot
;
168 e
.optab
= (size_t) (convert_optab_table
[0] - optab
);
171 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
174 if (optab
->libcall_gen
)
176 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
177 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
179 return (*slot
)->libfunc
;
185 return (*slot
)->libfunc
;
188 /* Return libfunc corresponding operation defined by OPTAB in MODE.
189 Trigger lazy initialization if needed, return NULL if no libfunc is
192 optab_libfunc (optab optab
, enum machine_mode mode
)
194 struct libfunc_entry e
;
195 struct libfunc_entry
**slot
;
197 e
.optab
= (size_t) (optab_table
[0] - optab
);
200 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
203 if (optab
->libcall_gen
)
205 optab
->libcall_gen (optab
, optab
->libcall_basename
,
206 optab
->libcall_suffix
, mode
);
207 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
210 return (*slot
)->libfunc
;
216 return (*slot
)->libfunc
;
220 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
221 the result of operation CODE applied to OP0 (and OP1 if it is a binary
224 If the last insn does not set TARGET, don't do anything, but return 1.
226 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
227 don't add the REG_EQUAL note but return 0. Our caller can then try
228 again, ensuring that TARGET is not one of the operands. */
231 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
233 rtx last_insn
, insn
, set
;
236 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
238 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
239 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
240 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
241 && GET_RTX_CLASS (code
) != RTX_COMPARE
242 && GET_RTX_CLASS (code
) != RTX_UNARY
)
245 if (GET_CODE (target
) == ZERO_EXTRACT
)
248 for (last_insn
= insns
;
249 NEXT_INSN (last_insn
) != NULL_RTX
;
250 last_insn
= NEXT_INSN (last_insn
))
253 set
= single_set (last_insn
);
257 if (! rtx_equal_p (SET_DEST (set
), target
)
258 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
259 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
260 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
263 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
264 besides the last insn. */
265 if (reg_overlap_mentioned_p (target
, op0
)
266 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
268 insn
= PREV_INSN (last_insn
);
269 while (insn
!= NULL_RTX
)
271 if (reg_set_p (target
, insn
))
274 insn
= PREV_INSN (insn
);
278 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
279 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
281 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
283 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
288 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
289 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
290 not actually do a sign-extend or zero-extend, but can leave the
291 higher-order bits of the result rtx undefined, for example, in the case
292 of logical operations, but not right shifts. */
295 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
296 int unsignedp
, int no_extend
)
300 /* If we don't have to extend and this is a constant, return it. */
301 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
304 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
305 extend since it will be more efficient to do so unless the signedness of
306 a promoted object differs from our extension. */
308 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
309 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
310 return convert_modes (mode
, oldmode
, op
, unsignedp
);
312 /* If MODE is no wider than a single word, we return a paradoxical
314 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
315 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
317 /* Otherwise, get an object of MODE, clobber it, and set the low-order
320 result
= gen_reg_rtx (mode
);
321 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
322 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
326 /* Return the optab used for computing the operation given by
327 the tree code, CODE. This function is not always usable (for
328 example, it cannot give complete results for multiplication
329 or division) but probably ought to be relied on more widely
330 throughout the expander. */
332 optab_for_tree_code (enum tree_code code
, const_tree type
)
344 return one_cmpl_optab
;
353 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
361 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
367 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
376 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
379 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
381 case REALIGN_LOAD_EXPR
:
382 return vec_realign_load_optab
;
385 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
388 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
391 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
394 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
396 case REDUC_PLUS_EXPR
:
397 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
399 case VEC_LSHIFT_EXPR
:
400 return vec_shl_optab
;
402 case VEC_RSHIFT_EXPR
:
403 return vec_shr_optab
;
405 case VEC_WIDEN_MULT_HI_EXPR
:
406 return TYPE_UNSIGNED (type
) ?
407 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
409 case VEC_WIDEN_MULT_LO_EXPR
:
410 return TYPE_UNSIGNED (type
) ?
411 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
413 case VEC_UNPACK_HI_EXPR
:
414 return TYPE_UNSIGNED (type
) ?
415 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
417 case VEC_UNPACK_LO_EXPR
:
418 return TYPE_UNSIGNED (type
) ?
419 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
421 case VEC_UNPACK_FLOAT_HI_EXPR
:
422 /* The signedness is determined from input operand. */
423 return TYPE_UNSIGNED (type
) ?
424 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
426 case VEC_UNPACK_FLOAT_LO_EXPR
:
427 /* The signedness is determined from input operand. */
428 return TYPE_UNSIGNED (type
) ?
429 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
431 case VEC_PACK_TRUNC_EXPR
:
432 return vec_pack_trunc_optab
;
434 case VEC_PACK_SAT_EXPR
:
435 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
437 case VEC_PACK_FIX_TRUNC_EXPR
:
438 /* The signedness is determined from output operand. */
439 return TYPE_UNSIGNED (type
) ?
440 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
446 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
449 case POINTER_PLUS_EXPR
:
451 return trapv
? addv_optab
: add_optab
;
454 return trapv
? subv_optab
: sub_optab
;
457 return trapv
? smulv_optab
: smul_optab
;
460 return trapv
? negv_optab
: neg_optab
;
463 return trapv
? absv_optab
: abs_optab
;
465 case VEC_EXTRACT_EVEN_EXPR
:
466 return vec_extract_even_optab
;
468 case VEC_EXTRACT_ODD_EXPR
:
469 return vec_extract_odd_optab
;
471 case VEC_INTERLEAVE_HIGH_EXPR
:
472 return vec_interleave_high_optab
;
474 case VEC_INTERLEAVE_LOW_EXPR
:
475 return vec_interleave_low_optab
;
483 /* Expand vector widening operations.
485 There are two different classes of operations handled here:
486 1) Operations whose result is wider than all the arguments to the operation.
487 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
488 In this case OP0 and optionally OP1 would be initialized,
489 but WIDE_OP wouldn't (not relevant for this case).
490 2) Operations whose result is of the same size as the last argument to the
491 operation, but wider than all the other arguments to the operation.
492 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
493 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
495 E.g, when called to expand the following operations, this is how
496 the arguments will be initialized:
498 widening-sum 2 oprnd0 - oprnd1
499 widening-dot-product 3 oprnd0 oprnd1 oprnd2
500 widening-mult 2 oprnd0 oprnd1 -
501 type-promotion (vec-unpack) 1 oprnd0 - - */
504 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
507 tree oprnd0
, oprnd1
, oprnd2
;
508 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
509 optab widen_pattern_optab
;
511 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
514 rtx xop0
, xop1
, wxop
;
515 int nops
= TREE_OPERAND_LENGTH (exp
);
517 oprnd0
= TREE_OPERAND (exp
, 0);
518 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
519 widen_pattern_optab
=
520 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
521 icode
= (int) optab_handler (widen_pattern_optab
, tmode0
)->insn_code
;
522 gcc_assert (icode
!= CODE_FOR_nothing
);
523 xmode0
= insn_data
[icode
].operand
[1].mode
;
527 oprnd1
= TREE_OPERAND (exp
, 1);
528 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
529 xmode1
= insn_data
[icode
].operand
[2].mode
;
532 /* The last operand is of a wider mode than the rest of the operands. */
540 gcc_assert (tmode1
== tmode0
);
542 oprnd2
= TREE_OPERAND (exp
, 2);
543 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
544 wxmode
= insn_data
[icode
].operand
[3].mode
;
548 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
551 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
552 temp
= gen_reg_rtx (wmode
);
560 /* In case the insn wants input operands in modes different from
561 those of the actual operands, convert the operands. It would
562 seem that we don't need to convert CONST_INTs, but we do, so
563 that they're properly zero-extended, sign-extended or truncated
566 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
567 xop0
= convert_modes (xmode0
,
568 GET_MODE (op0
) != VOIDmode
574 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
575 xop1
= convert_modes (xmode1
,
576 GET_MODE (op1
) != VOIDmode
582 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
583 wxop
= convert_modes (wxmode
,
584 GET_MODE (wide_op
) != VOIDmode
589 /* Now, if insn's predicates don't allow our operands, put them into
592 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
593 && xmode0
!= VOIDmode
)
594 xop0
= copy_to_mode_reg (xmode0
, xop0
);
598 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
599 && xmode1
!= VOIDmode
)
600 xop1
= copy_to_mode_reg (xmode1
, xop1
);
604 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
605 && wxmode
!= VOIDmode
)
606 wxop
= copy_to_mode_reg (wxmode
, wxop
);
608 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
611 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
617 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
618 && wxmode
!= VOIDmode
)
619 wxop
= copy_to_mode_reg (wxmode
, wxop
);
621 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
624 pat
= GEN_FCN (icode
) (temp
, xop0
);
631 /* Generate code to perform an operation specified by TERNARY_OPTAB
632 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
634 UNSIGNEDP is for the case where we have to widen the operands
635 to perform the operation. It says to use zero-extension.
637 If TARGET is nonzero, the value
638 is generated there, if it is convenient to do so.
639 In all cases an rtx is returned for the locus of the value;
640 this may or may not be TARGET. */
643 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
644 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
646 int icode
= (int) optab_handler (ternary_optab
, mode
)->insn_code
;
647 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
648 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
649 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
652 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
654 gcc_assert (optab_handler (ternary_optab
, mode
)->insn_code
655 != CODE_FOR_nothing
);
657 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
658 temp
= gen_reg_rtx (mode
);
662 /* In case the insn wants input operands in modes different from
663 those of the actual operands, convert the operands. It would
664 seem that we don't need to convert CONST_INTs, but we do, so
665 that they're properly zero-extended, sign-extended or truncated
668 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
669 xop0
= convert_modes (mode0
,
670 GET_MODE (op0
) != VOIDmode
675 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
676 xop1
= convert_modes (mode1
,
677 GET_MODE (op1
) != VOIDmode
682 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
683 xop2
= convert_modes (mode2
,
684 GET_MODE (op2
) != VOIDmode
689 /* Now, if insn's predicates don't allow our operands, put them into
692 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
693 && mode0
!= VOIDmode
)
694 xop0
= copy_to_mode_reg (mode0
, xop0
);
696 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
697 && mode1
!= VOIDmode
)
698 xop1
= copy_to_mode_reg (mode1
, xop1
);
700 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
701 && mode2
!= VOIDmode
)
702 xop2
= copy_to_mode_reg (mode2
, xop2
);
704 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
711 /* Like expand_binop, but return a constant rtx if the result can be
712 calculated at compile time. The arguments and return value are
713 otherwise the same as for expand_binop. */
716 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
717 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
718 enum optab_methods methods
)
720 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
722 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
728 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
731 /* Like simplify_expand_binop, but always put the result in TARGET.
732 Return true if the expansion succeeded. */
735 force_expand_binop (enum machine_mode mode
, optab binoptab
,
736 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
737 enum optab_methods methods
)
739 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
740 target
, unsignedp
, methods
);
744 emit_move_insn (target
, x
);
748 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
751 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
753 enum insn_code icode
;
754 rtx rtx_op1
, rtx_op2
;
755 enum machine_mode mode1
;
756 enum machine_mode mode2
;
757 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
758 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
759 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
763 switch (TREE_CODE (vec_shift_expr
))
765 case VEC_RSHIFT_EXPR
:
766 shift_optab
= vec_shr_optab
;
768 case VEC_LSHIFT_EXPR
:
769 shift_optab
= vec_shl_optab
;
775 icode
= (int) optab_handler (shift_optab
, mode
)->insn_code
;
776 gcc_assert (icode
!= CODE_FOR_nothing
);
778 mode1
= insn_data
[icode
].operand
[1].mode
;
779 mode2
= insn_data
[icode
].operand
[2].mode
;
781 rtx_op1
= expand_normal (vec_oprnd
);
782 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
783 && mode1
!= VOIDmode
)
784 rtx_op1
= force_reg (mode1
, rtx_op1
);
786 rtx_op2
= expand_normal (shift_oprnd
);
787 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
788 && mode2
!= VOIDmode
)
789 rtx_op2
= force_reg (mode2
, rtx_op2
);
792 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
793 target
= gen_reg_rtx (mode
);
795 /* Emit instruction */
796 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
803 /* This subroutine of expand_doubleword_shift handles the cases in which
804 the effective shift value is >= BITS_PER_WORD. The arguments and return
805 value are the same as for the parent routine, except that SUPERWORD_OP1
806 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
807 INTO_TARGET may be null if the caller has decided to calculate it. */
810 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
811 rtx outof_target
, rtx into_target
,
812 int unsignedp
, enum optab_methods methods
)
814 if (into_target
!= 0)
815 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
816 into_target
, unsignedp
, methods
))
819 if (outof_target
!= 0)
821 /* For a signed right shift, we must fill OUTOF_TARGET with copies
822 of the sign bit, otherwise we must fill it with zeros. */
823 if (binoptab
!= ashr_optab
)
824 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
826 if (!force_expand_binop (word_mode
, binoptab
,
827 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
828 outof_target
, unsignedp
, methods
))
834 /* This subroutine of expand_doubleword_shift handles the cases in which
835 the effective shift value is < BITS_PER_WORD. The arguments and return
836 value are the same as for the parent routine. */
839 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
840 rtx outof_input
, rtx into_input
, rtx op1
,
841 rtx outof_target
, rtx into_target
,
842 int unsignedp
, enum optab_methods methods
,
843 unsigned HOST_WIDE_INT shift_mask
)
845 optab reverse_unsigned_shift
, unsigned_shift
;
848 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
849 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
851 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
852 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
853 the opposite direction to BINOPTAB. */
854 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
856 carries
= outof_input
;
857 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
858 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
863 /* We must avoid shifting by BITS_PER_WORD bits since that is either
864 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
865 has unknown behavior. Do a single shift first, then shift by the
866 remainder. It's OK to use ~OP1 as the remainder if shift counts
867 are truncated to the mode size. */
868 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
869 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
870 if (shift_mask
== BITS_PER_WORD
- 1)
872 tmp
= immed_double_const (-1, -1, op1_mode
);
873 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
878 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
879 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
883 if (tmp
== 0 || carries
== 0)
885 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
886 carries
, tmp
, 0, unsignedp
, methods
);
890 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
891 so the result can go directly into INTO_TARGET if convenient. */
892 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
893 into_target
, unsignedp
, methods
);
897 /* Now OR in the bits carried over from OUTOF_INPUT. */
898 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
899 into_target
, unsignedp
, methods
))
902 /* Use a standard word_mode shift for the out-of half. */
903 if (outof_target
!= 0)
904 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
905 outof_target
, unsignedp
, methods
))
912 #ifdef HAVE_conditional_move
913 /* Try implementing expand_doubleword_shift using conditional moves.
914 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
915 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
916 are the shift counts to use in the former and latter case. All other
917 arguments are the same as the parent routine. */
920 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
921 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
922 rtx outof_input
, rtx into_input
,
923 rtx subword_op1
, rtx superword_op1
,
924 rtx outof_target
, rtx into_target
,
925 int unsignedp
, enum optab_methods methods
,
926 unsigned HOST_WIDE_INT shift_mask
)
928 rtx outof_superword
, into_superword
;
930 /* Put the superword version of the output into OUTOF_SUPERWORD and
932 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
933 if (outof_target
!= 0 && subword_op1
== superword_op1
)
935 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
936 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
937 into_superword
= outof_target
;
938 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
939 outof_superword
, 0, unsignedp
, methods
))
944 into_superword
= gen_reg_rtx (word_mode
);
945 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
946 outof_superword
, into_superword
,
951 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
952 if (!expand_subword_shift (op1_mode
, binoptab
,
953 outof_input
, into_input
, subword_op1
,
954 outof_target
, into_target
,
955 unsignedp
, methods
, shift_mask
))
958 /* Select between them. Do the INTO half first because INTO_SUPERWORD
959 might be the current value of OUTOF_TARGET. */
960 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
961 into_target
, into_superword
, word_mode
, false))
964 if (outof_target
!= 0)
965 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
966 outof_target
, outof_superword
,
974 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
975 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
976 input operand; the shift moves bits in the direction OUTOF_INPUT->
977 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
978 of the target. OP1 is the shift count and OP1_MODE is its mode.
979 If OP1 is constant, it will have been truncated as appropriate
980 and is known to be nonzero.
982 If SHIFT_MASK is zero, the result of word shifts is undefined when the
983 shift count is outside the range [0, BITS_PER_WORD). This routine must
984 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
986 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
987 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
988 fill with zeros or sign bits as appropriate.
990 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
991 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
992 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
993 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
996 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
997 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
998 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
999 function wants to calculate it itself.
1001 Return true if the shift could be successfully synthesized. */
1004 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
1005 rtx outof_input
, rtx into_input
, rtx op1
,
1006 rtx outof_target
, rtx into_target
,
1007 int unsignedp
, enum optab_methods methods
,
1008 unsigned HOST_WIDE_INT shift_mask
)
1010 rtx superword_op1
, tmp
, cmp1
, cmp2
;
1011 rtx subword_label
, done_label
;
1012 enum rtx_code cmp_code
;
1014 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1015 fill the result with sign or zero bits as appropriate. If so, the value
1016 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1017 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1018 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1020 This isn't worthwhile for constant shifts since the optimizers will
1021 cope better with in-range shift counts. */
1022 if (shift_mask
>= BITS_PER_WORD
1023 && outof_target
!= 0
1024 && !CONSTANT_P (op1
))
1026 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1027 outof_input
, into_input
, op1
,
1029 unsignedp
, methods
, shift_mask
))
1031 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1032 outof_target
, unsignedp
, methods
))
1037 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1038 is true when the effective shift value is less than BITS_PER_WORD.
1039 Set SUPERWORD_OP1 to the shift count that should be used to shift
1040 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1041 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1042 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1044 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1045 is a subword shift count. */
1046 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1048 cmp2
= CONST0_RTX (op1_mode
);
1050 superword_op1
= op1
;
1054 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1055 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1057 cmp2
= CONST0_RTX (op1_mode
);
1059 superword_op1
= cmp1
;
1064 /* If we can compute the condition at compile time, pick the
1065 appropriate subroutine. */
1066 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1067 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
1069 if (tmp
== const0_rtx
)
1070 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1071 outof_target
, into_target
,
1072 unsignedp
, methods
);
1074 return expand_subword_shift (op1_mode
, binoptab
,
1075 outof_input
, into_input
, op1
,
1076 outof_target
, into_target
,
1077 unsignedp
, methods
, shift_mask
);
1080 #ifdef HAVE_conditional_move
1081 /* Try using conditional moves to generate straight-line code. */
1083 rtx start
= get_last_insn ();
1084 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1085 cmp_code
, cmp1
, cmp2
,
1086 outof_input
, into_input
,
1088 outof_target
, into_target
,
1089 unsignedp
, methods
, shift_mask
))
1091 delete_insns_since (start
);
1095 /* As a last resort, use branches to select the correct alternative. */
1096 subword_label
= gen_label_rtx ();
1097 done_label
= gen_label_rtx ();
1100 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1101 0, 0, subword_label
);
1104 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1105 outof_target
, into_target
,
1106 unsignedp
, methods
))
1109 emit_jump_insn (gen_jump (done_label
));
1111 emit_label (subword_label
);
1113 if (!expand_subword_shift (op1_mode
, binoptab
,
1114 outof_input
, into_input
, op1
,
1115 outof_target
, into_target
,
1116 unsignedp
, methods
, shift_mask
))
1119 emit_label (done_label
);
1123 /* Subroutine of expand_binop. Perform a double word multiplication of
1124 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1125 as the target's word_mode. This function return NULL_RTX if anything
1126 goes wrong, in which case it may have already emitted instructions
1127 which need to be deleted.
1129 If we want to multiply two two-word values and have normal and widening
1130 multiplies of single-word values, we can do this with three smaller
1131 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1132 because we are not operating on one word at a time.
1134 The multiplication proceeds as follows:
1135 _______________________
1136 [__op0_high_|__op0_low__]
1137 _______________________
1138 * [__op1_high_|__op1_low__]
1139 _______________________________________________
1140 _______________________
1141 (1) [__op0_low__*__op1_low__]
1142 _______________________
1143 (2a) [__op0_low__*__op1_high_]
1144 _______________________
1145 (2b) [__op0_high_*__op1_low__]
1146 _______________________
1147 (3) [__op0_high_*__op1_high_]
1150 This gives a 4-word result. Since we are only interested in the
1151 lower 2 words, partial result (3) and the upper words of (2a) and
1152 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1153 calculated using non-widening multiplication.
1155 (1), however, needs to be calculated with an unsigned widening
1156 multiplication. If this operation is not directly supported we
1157 try using a signed widening multiplication and adjust the result.
1158 This adjustment works as follows:
1160 If both operands are positive then no adjustment is needed.
1162 If the operands have different signs, for example op0_low < 0 and
1163 op1_low >= 0, the instruction treats the most significant bit of
1164 op0_low as a sign bit instead of a bit with significance
1165 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1166 with 2**BITS_PER_WORD - op0_low, and two's complements the
1167 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1170 Similarly, if both operands are negative, we need to add
1171 (op0_low + op1_low) * 2**BITS_PER_WORD.
1173 We use a trick to adjust quickly. We logically shift op0_low right
1174 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1175 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1176 logical shift exists, we do an arithmetic right shift and subtract
1180 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1181 bool umulp
, enum optab_methods methods
)
1183 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1184 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1185 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1186 rtx product
, adjust
, product_high
, temp
;
1188 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1189 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1190 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1191 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1193 /* If we're using an unsigned multiply to directly compute the product
1194 of the low-order words of the operands and perform any required
1195 adjustments of the operands, we begin by trying two more multiplications
1196 and then computing the appropriate sum.
1198 We have checked above that the required addition is provided.
1199 Full-word addition will normally always succeed, especially if
1200 it is provided at all, so we don't worry about its failure. The
1201 multiplication may well fail, however, so we do handle that. */
1205 /* ??? This could be done with emit_store_flag where available. */
1206 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1207 NULL_RTX
, 1, methods
);
1209 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1210 NULL_RTX
, 0, OPTAB_DIRECT
);
1213 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1214 NULL_RTX
, 0, methods
);
1217 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1218 NULL_RTX
, 0, OPTAB_DIRECT
);
1225 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1226 NULL_RTX
, 0, OPTAB_DIRECT
);
1230 /* OP0_HIGH should now be dead. */
1234 /* ??? This could be done with emit_store_flag where available. */
1235 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1236 NULL_RTX
, 1, methods
);
1238 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1239 NULL_RTX
, 0, OPTAB_DIRECT
);
1242 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1243 NULL_RTX
, 0, methods
);
1246 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1247 NULL_RTX
, 0, OPTAB_DIRECT
);
1254 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1255 NULL_RTX
, 0, OPTAB_DIRECT
);
1259 /* OP1_HIGH should now be dead. */
1261 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1262 adjust
, 0, OPTAB_DIRECT
);
1264 if (target
&& !REG_P (target
))
1268 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1269 target
, 1, OPTAB_DIRECT
);
1271 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1272 target
, 1, OPTAB_DIRECT
);
1277 product_high
= operand_subword (product
, high
, 1, mode
);
1278 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1279 REG_P (product_high
) ? product_high
: adjust
,
1281 emit_move_insn (product_high
, adjust
);
1285 /* Wrapper around expand_binop which takes an rtx code to specify
1286 the operation to perform, not an optab pointer. All other
1287 arguments are the same. */
1289 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1290 rtx op1
, rtx target
, int unsignedp
,
1291 enum optab_methods methods
)
1293 optab binop
= code_to_optab
[(int) code
];
1296 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1299 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1300 binop. Order them according to commutative_operand_precedence and, if
1301 possible, try to put TARGET or a pseudo first. */
1303 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1305 int op0_prec
= commutative_operand_precedence (op0
);
1306 int op1_prec
= commutative_operand_precedence (op1
);
1308 if (op0_prec
< op1_prec
)
1311 if (op0_prec
> op1_prec
)
1314 /* With equal precedence, both orders are ok, but it is better if the
1315 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1316 if (target
== 0 || REG_P (target
))
1317 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1319 return rtx_equal_p (op1
, target
);
1322 /* Return true if BINOPTAB implements a shift operation. */
1325 shift_optab_p (optab binoptab
)
1327 switch (binoptab
->code
)
1341 /* Return true if BINOPTAB implements a commutative binary operation. */
1344 commutative_optab_p (optab binoptab
)
1346 return (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1347 || binoptab
== smul_widen_optab
1348 || binoptab
== umul_widen_optab
1349 || binoptab
== smul_highpart_optab
1350 || binoptab
== umul_highpart_optab
);
1353 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1354 optimizing, and if the operand is a constant that costs more than
1355 1 instruction, force the constant into a register and return that
1356 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1359 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1360 rtx x
, bool unsignedp
)
1364 && rtx_cost (x
, binoptab
->code
) > COSTS_N_INSNS (1))
1366 if (GET_CODE (x
) == CONST_INT
)
1368 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1369 if (intval
!= INTVAL (x
))
1370 x
= GEN_INT (intval
);
1373 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1374 x
= force_reg (mode
, x
);
1379 /* Helper function for expand_binop: handle the case where there
1380 is an insn that directly implements the indicated operation.
1381 Returns null if this is not possible. */
1383 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1385 rtx target
, int unsignedp
, enum optab_methods methods
,
1388 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
1389 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1390 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1391 enum machine_mode tmp_mode
;
1394 rtx xop0
= op0
, xop1
= op1
;
1401 temp
= gen_reg_rtx (mode
);
1403 /* If it is a commutative operator and the modes would match
1404 if we would swap the operands, we can save the conversions. */
1405 commutative_p
= commutative_optab_p (binoptab
);
1407 && GET_MODE (xop0
) != mode0
&& GET_MODE (xop1
) != mode1
1408 && GET_MODE (xop0
) == mode1
&& GET_MODE (xop1
) == mode1
)
1415 /* If we are optimizing, force expensive constants into a register. */
1416 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
1417 if (!shift_optab_p (binoptab
))
1418 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
1420 /* In case the insn wants input operands in modes different from
1421 those of the actual operands, convert the operands. It would
1422 seem that we don't need to convert CONST_INTs, but we do, so
1423 that they're properly zero-extended, sign-extended or truncated
1426 if (GET_MODE (xop0
) != mode0
&& mode0
!= VOIDmode
)
1427 xop0
= convert_modes (mode0
,
1428 GET_MODE (xop0
) != VOIDmode
1433 if (GET_MODE (xop1
) != mode1
&& mode1
!= VOIDmode
)
1434 xop1
= convert_modes (mode1
,
1435 GET_MODE (xop1
) != VOIDmode
1440 /* If operation is commutative,
1441 try to make the first operand a register.
1442 Even better, try to make it the same as the target.
1443 Also try to make the last operand a constant. */
1445 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1452 /* Now, if insn's predicates don't allow our operands, put them into
1455 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1456 && mode0
!= VOIDmode
)
1457 xop0
= copy_to_mode_reg (mode0
, xop0
);
1459 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1460 && mode1
!= VOIDmode
)
1461 xop1
= copy_to_mode_reg (mode1
, xop1
);
1463 if (binoptab
== vec_pack_trunc_optab
1464 || binoptab
== vec_pack_usat_optab
1465 || binoptab
== vec_pack_ssat_optab
1466 || binoptab
== vec_pack_ufix_trunc_optab
1467 || binoptab
== vec_pack_sfix_trunc_optab
)
1469 /* The mode of the result is different then the mode of the
1471 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1472 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1478 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1479 temp
= gen_reg_rtx (tmp_mode
);
1481 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1484 /* If PAT is composed of more than one insn, try to add an appropriate
1485 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1486 operand, call expand_binop again, this time without a target. */
1487 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1488 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1490 delete_insns_since (last
);
1491 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1492 unsignedp
, methods
);
1499 delete_insns_since (last
);
1503 /* Generate code to perform an operation specified by BINOPTAB
1504 on operands OP0 and OP1, with result having machine-mode MODE.
1506 UNSIGNEDP is for the case where we have to widen the operands
1507 to perform the operation. It says to use zero-extension.
1509 If TARGET is nonzero, the value
1510 is generated there, if it is convenient to do so.
1511 In all cases an rtx is returned for the locus of the value;
1512 this may or may not be TARGET. */
1515 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1516 rtx target
, int unsignedp
, enum optab_methods methods
)
1518 enum optab_methods next_methods
1519 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1520 ? OPTAB_WIDEN
: methods
);
1521 enum mode_class
class;
1522 enum machine_mode wider_mode
;
1525 rtx entry_last
= get_last_insn ();
1528 class = GET_MODE_CLASS (mode
);
1530 /* If subtracting an integer constant, convert this into an addition of
1531 the negated constant. */
1533 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1535 op1
= negate_rtx (mode
, op1
);
1536 binoptab
= add_optab
;
1539 /* Record where to delete back to if we backtrack. */
1540 last
= get_last_insn ();
1542 /* If we can do it with a three-operand insn, do so. */
1544 if (methods
!= OPTAB_MUST_WIDEN
1545 && optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1547 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1548 unsignedp
, methods
, last
);
1553 /* If we were trying to rotate, and that didn't work, try rotating
1554 the other direction before falling back to shifts and bitwise-or. */
1555 if (((binoptab
== rotl_optab
1556 && optab_handler (rotr_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1557 || (binoptab
== rotr_optab
1558 && optab_handler (rotl_optab
, mode
)->insn_code
!= CODE_FOR_nothing
))
1559 && class == MODE_INT
)
1561 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1563 unsigned int bits
= GET_MODE_BITSIZE (mode
);
1565 if (GET_CODE (op1
) == CONST_INT
)
1566 newop1
= GEN_INT (bits
- INTVAL (op1
));
1567 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1568 newop1
= negate_rtx (mode
, op1
);
1570 newop1
= expand_binop (mode
, sub_optab
,
1571 GEN_INT (bits
), op1
,
1572 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1574 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1575 target
, unsignedp
, methods
, last
);
1580 /* If this is a multiply, see if we can do a widening operation that
1581 takes operands of this mode and makes a wider mode. */
1583 if (binoptab
== smul_optab
1584 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1585 && ((optab_handler ((unsignedp
? umul_widen_optab
: smul_widen_optab
),
1586 GET_MODE_WIDER_MODE (mode
))->insn_code
)
1587 != CODE_FOR_nothing
))
1589 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1590 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1591 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1595 if (GET_MODE_CLASS (mode
) == MODE_INT
1596 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1597 GET_MODE_BITSIZE (GET_MODE (temp
))))
1598 return gen_lowpart (mode
, temp
);
1600 return convert_to_mode (mode
, temp
, unsignedp
);
1604 /* Look for a wider mode of the same class for which we think we
1605 can open-code the operation. Check for a widening multiply at the
1606 wider mode as well. */
1608 if (CLASS_HAS_WIDER_MODES_P (class)
1609 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1610 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1611 wider_mode
!= VOIDmode
;
1612 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1614 if (optab_handler (binoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
1615 || (binoptab
== smul_optab
1616 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1617 && ((optab_handler ((unsignedp
? umul_widen_optab
1618 : smul_widen_optab
),
1619 GET_MODE_WIDER_MODE (wider_mode
))->insn_code
)
1620 != CODE_FOR_nothing
)))
1622 rtx xop0
= op0
, xop1
= op1
;
1625 /* For certain integer operations, we need not actually extend
1626 the narrow operands, as long as we will truncate
1627 the results to the same narrowness. */
1629 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1630 || binoptab
== xor_optab
1631 || binoptab
== add_optab
|| binoptab
== sub_optab
1632 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1633 && class == MODE_INT
)
1636 xop0
= avoid_expensive_constant (mode
, binoptab
,
1638 if (binoptab
!= ashl_optab
)
1639 xop1
= avoid_expensive_constant (mode
, binoptab
,
1643 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1645 /* The second operand of a shift must always be extended. */
1646 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1647 no_extend
&& binoptab
!= ashl_optab
);
1649 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1650 unsignedp
, OPTAB_DIRECT
);
1653 if (class != MODE_INT
1654 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1655 GET_MODE_BITSIZE (wider_mode
)))
1658 target
= gen_reg_rtx (mode
);
1659 convert_move (target
, temp
, 0);
1663 return gen_lowpart (mode
, temp
);
1666 delete_insns_since (last
);
1670 /* If operation is commutative,
1671 try to make the first operand a register.
1672 Even better, try to make it the same as the target.
1673 Also try to make the last operand a constant. */
1674 if (commutative_optab_p (binoptab
)
1675 && swap_commutative_operands_with_target (target
, op0
, op1
))
1682 /* These can be done a word at a time. */
1683 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1684 && class == MODE_INT
1685 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1686 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1692 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1693 won't be accurate, so use a new target. */
1694 if (target
== 0 || target
== op0
|| target
== op1
)
1695 target
= gen_reg_rtx (mode
);
1699 /* Do the actual arithmetic. */
1700 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1702 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1703 rtx x
= expand_binop (word_mode
, binoptab
,
1704 operand_subword_force (op0
, i
, mode
),
1705 operand_subword_force (op1
, i
, mode
),
1706 target_piece
, unsignedp
, next_methods
);
1711 if (target_piece
!= x
)
1712 emit_move_insn (target_piece
, x
);
1715 insns
= get_insns ();
1718 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1720 if (binoptab
->code
!= UNKNOWN
)
1722 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1723 copy_rtx (op0
), copy_rtx (op1
));
1727 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1732 /* Synthesize double word shifts from single word shifts. */
1733 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1734 || binoptab
== ashr_optab
)
1735 && class == MODE_INT
1736 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1737 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1738 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1739 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1740 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1742 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1743 enum machine_mode op1_mode
;
1745 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1746 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1747 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1749 /* Apply the truncation to constant shifts. */
1750 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1751 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1753 if (op1
== CONST0_RTX (op1_mode
))
1756 /* Make sure that this is a combination that expand_doubleword_shift
1757 can handle. See the comments there for details. */
1758 if (double_shift_mask
== 0
1759 || (shift_mask
== BITS_PER_WORD
- 1
1760 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1762 rtx insns
, equiv_value
;
1763 rtx into_target
, outof_target
;
1764 rtx into_input
, outof_input
;
1765 int left_shift
, outof_word
;
1767 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1768 won't be accurate, so use a new target. */
1769 if (target
== 0 || target
== op0
|| target
== op1
)
1770 target
= gen_reg_rtx (mode
);
1774 /* OUTOF_* is the word we are shifting bits away from, and
1775 INTO_* is the word that we are shifting bits towards, thus
1776 they differ depending on the direction of the shift and
1777 WORDS_BIG_ENDIAN. */
1779 left_shift
= binoptab
== ashl_optab
;
1780 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1782 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1783 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1785 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1786 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1788 if (expand_doubleword_shift (op1_mode
, binoptab
,
1789 outof_input
, into_input
, op1
,
1790 outof_target
, into_target
,
1791 unsignedp
, next_methods
, shift_mask
))
1793 insns
= get_insns ();
1796 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1797 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1804 /* Synthesize double word rotates from single word shifts. */
1805 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1806 && class == MODE_INT
1807 && GET_CODE (op1
) == CONST_INT
1808 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1809 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1810 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1813 rtx into_target
, outof_target
;
1814 rtx into_input
, outof_input
;
1816 int shift_count
, left_shift
, outof_word
;
1818 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1819 won't be accurate, so use a new target. Do this also if target is not
1820 a REG, first because having a register instead may open optimization
1821 opportunities, and second because if target and op0 happen to be MEMs
1822 designating the same location, we would risk clobbering it too early
1823 in the code sequence we generate below. */
1824 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1825 target
= gen_reg_rtx (mode
);
1829 shift_count
= INTVAL (op1
);
1831 /* OUTOF_* is the word we are shifting bits away from, and
1832 INTO_* is the word that we are shifting bits towards, thus
1833 they differ depending on the direction of the shift and
1834 WORDS_BIG_ENDIAN. */
1836 left_shift
= (binoptab
== rotl_optab
);
1837 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1839 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1840 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1842 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1843 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1845 if (shift_count
== BITS_PER_WORD
)
1847 /* This is just a word swap. */
1848 emit_move_insn (outof_target
, into_input
);
1849 emit_move_insn (into_target
, outof_input
);
1854 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1855 rtx first_shift_count
, second_shift_count
;
1856 optab reverse_unsigned_shift
, unsigned_shift
;
1858 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1859 ? lshr_optab
: ashl_optab
);
1861 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1862 ? ashl_optab
: lshr_optab
);
1864 if (shift_count
> BITS_PER_WORD
)
1866 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1867 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1871 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1872 second_shift_count
= GEN_INT (shift_count
);
1875 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1876 outof_input
, first_shift_count
,
1877 NULL_RTX
, unsignedp
, next_methods
);
1878 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1879 into_input
, second_shift_count
,
1880 NULL_RTX
, unsignedp
, next_methods
);
1882 if (into_temp1
!= 0 && into_temp2
!= 0)
1883 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1884 into_target
, unsignedp
, next_methods
);
1888 if (inter
!= 0 && inter
!= into_target
)
1889 emit_move_insn (into_target
, inter
);
1891 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1892 into_input
, first_shift_count
,
1893 NULL_RTX
, unsignedp
, next_methods
);
1894 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1895 outof_input
, second_shift_count
,
1896 NULL_RTX
, unsignedp
, next_methods
);
1898 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1899 inter
= expand_binop (word_mode
, ior_optab
,
1900 outof_temp1
, outof_temp2
,
1901 outof_target
, unsignedp
, next_methods
);
1903 if (inter
!= 0 && inter
!= outof_target
)
1904 emit_move_insn (outof_target
, inter
);
1907 insns
= get_insns ();
1912 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1913 block to help the register allocator a bit. But a multi-word
1914 rotate will need all the input bits when setting the output
1915 bits, so there clearly is a conflict between the input and
1916 output registers. So we can't use a no-conflict block here. */
1922 /* These can be done a word at a time by propagating carries. */
1923 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1924 && class == MODE_INT
1925 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1926 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1929 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1930 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1931 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1932 rtx xop0
, xop1
, xtarget
;
1934 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1935 value is one of those, use it. Otherwise, use 1 since it is the
1936 one easiest to get. */
1937 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1938 int normalizep
= STORE_FLAG_VALUE
;
1943 /* Prepare the operands. */
1944 xop0
= force_reg (mode
, op0
);
1945 xop1
= force_reg (mode
, op1
);
1947 xtarget
= gen_reg_rtx (mode
);
1949 if (target
== 0 || !REG_P (target
))
1952 /* Indicate for flow that the entire target reg is being set. */
1954 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1956 /* Do the actual arithmetic. */
1957 for (i
= 0; i
< nwords
; i
++)
1959 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1960 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1961 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1962 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1965 /* Main add/subtract of the input operands. */
1966 x
= expand_binop (word_mode
, binoptab
,
1967 op0_piece
, op1_piece
,
1968 target_piece
, unsignedp
, next_methods
);
1974 /* Store carry from main add/subtract. */
1975 carry_out
= gen_reg_rtx (word_mode
);
1976 carry_out
= emit_store_flag_force (carry_out
,
1977 (binoptab
== add_optab
1980 word_mode
, 1, normalizep
);
1987 /* Add/subtract previous carry to main result. */
1988 newx
= expand_binop (word_mode
,
1989 normalizep
== 1 ? binoptab
: otheroptab
,
1991 NULL_RTX
, 1, next_methods
);
1995 /* Get out carry from adding/subtracting carry in. */
1996 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1997 carry_tmp
= emit_store_flag_force (carry_tmp
,
1998 (binoptab
== add_optab
2001 word_mode
, 1, normalizep
);
2003 /* Logical-ior the two poss. carry together. */
2004 carry_out
= expand_binop (word_mode
, ior_optab
,
2005 carry_out
, carry_tmp
,
2006 carry_out
, 0, next_methods
);
2010 emit_move_insn (target_piece
, newx
);
2014 if (x
!= target_piece
)
2015 emit_move_insn (target_piece
, x
);
2018 carry_in
= carry_out
;
2021 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2023 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
2024 || ! rtx_equal_p (target
, xtarget
))
2026 rtx temp
= emit_move_insn (target
, xtarget
);
2028 set_unique_reg_note (temp
,
2030 gen_rtx_fmt_ee (binoptab
->code
, mode
,
2041 delete_insns_since (last
);
2044 /* Attempt to synthesize double word multiplies using a sequence of word
2045 mode multiplications. We first attempt to generate a sequence using a
2046 more efficient unsigned widening multiply, and if that fails we then
2047 try using a signed widening multiply. */
2049 if (binoptab
== smul_optab
2050 && class == MODE_INT
2051 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2052 && optab_handler (smul_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
2053 && optab_handler (add_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
2055 rtx product
= NULL_RTX
;
2057 if (optab_handler (umul_widen_optab
, mode
)->insn_code
2058 != CODE_FOR_nothing
)
2060 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2063 delete_insns_since (last
);
2066 if (product
== NULL_RTX
2067 && optab_handler (smul_widen_optab
, mode
)->insn_code
2068 != CODE_FOR_nothing
)
2070 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2073 delete_insns_since (last
);
2076 if (product
!= NULL_RTX
)
2078 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2080 temp
= emit_move_insn (target
? target
: product
, product
);
2081 set_unique_reg_note (temp
,
2083 gen_rtx_fmt_ee (MULT
, mode
,
2091 /* It can't be open-coded in this mode.
2092 Use a library call if one is available and caller says that's ok. */
2094 libfunc
= optab_libfunc (binoptab
, mode
);
2096 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2100 enum machine_mode op1_mode
= mode
;
2105 if (shift_optab_p (binoptab
))
2107 op1_mode
= targetm
.libgcc_shift_count_mode ();
2108 /* Specify unsigned here,
2109 since negative shift counts are meaningless. */
2110 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2113 if (GET_MODE (op0
) != VOIDmode
2114 && GET_MODE (op0
) != mode
)
2115 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2117 /* Pass 1 for NO_QUEUE so we don't lose any increments
2118 if the libcall is cse'd or moved. */
2119 value
= emit_library_call_value (libfunc
,
2120 NULL_RTX
, LCT_CONST
, mode
, 2,
2121 op0
, mode
, op1x
, op1_mode
);
2123 insns
= get_insns ();
2126 target
= gen_reg_rtx (mode
);
2127 emit_libcall_block (insns
, target
, value
,
2128 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
2133 delete_insns_since (last
);
2135 /* It can't be done in this mode. Can we do it in a wider mode? */
2137 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2138 || methods
== OPTAB_MUST_WIDEN
))
2140 /* Caller says, don't even try. */
2141 delete_insns_since (entry_last
);
2145 /* Compute the value of METHODS to pass to recursive calls.
2146 Don't allow widening to be tried recursively. */
2148 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2150 /* Look for a wider mode of the same class for which it appears we can do
2153 if (CLASS_HAS_WIDER_MODES_P (class))
2155 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2156 wider_mode
!= VOIDmode
;
2157 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2159 if ((optab_handler (binoptab
, wider_mode
)->insn_code
2160 != CODE_FOR_nothing
)
2161 || (methods
== OPTAB_LIB
2162 && optab_libfunc (binoptab
, wider_mode
)))
2164 rtx xop0
= op0
, xop1
= op1
;
2167 /* For certain integer operations, we need not actually extend
2168 the narrow operands, as long as we will truncate
2169 the results to the same narrowness. */
2171 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2172 || binoptab
== xor_optab
2173 || binoptab
== add_optab
|| binoptab
== sub_optab
2174 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2175 && class == MODE_INT
)
2178 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2179 unsignedp
, no_extend
);
2181 /* The second operand of a shift must always be extended. */
2182 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2183 no_extend
&& binoptab
!= ashl_optab
);
2185 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2186 unsignedp
, methods
);
2189 if (class != MODE_INT
2190 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2191 GET_MODE_BITSIZE (wider_mode
)))
2194 target
= gen_reg_rtx (mode
);
2195 convert_move (target
, temp
, 0);
2199 return gen_lowpart (mode
, temp
);
2202 delete_insns_since (last
);
2207 delete_insns_since (entry_last
);
2211 /* Expand a binary operator which has both signed and unsigned forms.
2212 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2215 If we widen unsigned operands, we may use a signed wider operation instead
2216 of an unsigned wider operation, since the result would be the same. */
2219 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2220 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2221 enum optab_methods methods
)
2224 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2225 struct optab wide_soptab
;
2227 /* Do it without widening, if possible. */
2228 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2229 unsignedp
, OPTAB_DIRECT
);
2230 if (temp
|| methods
== OPTAB_DIRECT
)
2233 /* Try widening to a signed int. Make a fake signed optab that
2234 hides any signed insn for direct use. */
2235 wide_soptab
= *soptab
;
2236 optab_handler (&wide_soptab
, mode
)->insn_code
= CODE_FOR_nothing
;
2238 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2239 unsignedp
, OPTAB_WIDEN
);
2241 /* For unsigned operands, try widening to an unsigned int. */
2242 if (temp
== 0 && unsignedp
)
2243 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2244 unsignedp
, OPTAB_WIDEN
);
2245 if (temp
|| methods
== OPTAB_WIDEN
)
2248 /* Use the right width lib call if that exists. */
2249 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2250 if (temp
|| methods
== OPTAB_LIB
)
2253 /* Must widen and use a lib call, use either signed or unsigned. */
2254 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2255 unsignedp
, methods
);
2259 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2260 unsignedp
, methods
);
2264 /* Generate code to perform an operation specified by UNOPPTAB
2265 on operand OP0, with two results to TARG0 and TARG1.
2266 We assume that the order of the operands for the instruction
2267 is TARG0, TARG1, OP0.
2269 Either TARG0 or TARG1 may be zero, but what that means is that
2270 the result is not actually wanted. We will generate it into
2271 a dummy pseudo-reg and discard it. They may not both be zero.
2273 Returns 1 if this operation can be performed; 0 if not. */
2276 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2279 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2280 enum mode_class
class;
2281 enum machine_mode wider_mode
;
2282 rtx entry_last
= get_last_insn ();
2285 class = GET_MODE_CLASS (mode
);
2288 targ0
= gen_reg_rtx (mode
);
2290 targ1
= gen_reg_rtx (mode
);
2292 /* Record where to go back to if we fail. */
2293 last
= get_last_insn ();
2295 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2297 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
2298 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2302 if (GET_MODE (xop0
) != VOIDmode
2303 && GET_MODE (xop0
) != mode0
)
2304 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2306 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2307 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2308 xop0
= copy_to_mode_reg (mode0
, xop0
);
2310 /* We could handle this, but we should always be called with a pseudo
2311 for our targets and all insns should take them as outputs. */
2312 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2313 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2315 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2322 delete_insns_since (last
);
2325 /* It can't be done in this mode. Can we do it in a wider mode? */
2327 if (CLASS_HAS_WIDER_MODES_P (class))
2329 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2330 wider_mode
!= VOIDmode
;
2331 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2333 if (optab_handler (unoptab
, wider_mode
)->insn_code
2334 != CODE_FOR_nothing
)
2336 rtx t0
= gen_reg_rtx (wider_mode
);
2337 rtx t1
= gen_reg_rtx (wider_mode
);
2338 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2340 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2342 convert_move (targ0
, t0
, unsignedp
);
2343 convert_move (targ1
, t1
, unsignedp
);
2347 delete_insns_since (last
);
2352 delete_insns_since (entry_last
);
2356 /* Generate code to perform an operation specified by BINOPTAB
2357 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2358 We assume that the order of the operands for the instruction
2359 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2360 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2362 Either TARG0 or TARG1 may be zero, but what that means is that
2363 the result is not actually wanted. We will generate it into
2364 a dummy pseudo-reg and discard it. They may not both be zero.
2366 Returns 1 if this operation can be performed; 0 if not. */
2369 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2372 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2373 enum mode_class
class;
2374 enum machine_mode wider_mode
;
2375 rtx entry_last
= get_last_insn ();
2378 class = GET_MODE_CLASS (mode
);
2381 targ0
= gen_reg_rtx (mode
);
2383 targ1
= gen_reg_rtx (mode
);
2385 /* Record where to go back to if we fail. */
2386 last
= get_last_insn ();
2388 if (optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2390 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
2391 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2392 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2394 rtx xop0
= op0
, xop1
= op1
;
2396 /* If we are optimizing, force expensive constants into a register. */
2397 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
2398 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
2400 /* In case the insn wants input operands in modes different from
2401 those of the actual operands, convert the operands. It would
2402 seem that we don't need to convert CONST_INTs, but we do, so
2403 that they're properly zero-extended, sign-extended or truncated
2406 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2407 xop0
= convert_modes (mode0
,
2408 GET_MODE (op0
) != VOIDmode
2413 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2414 xop1
= convert_modes (mode1
,
2415 GET_MODE (op1
) != VOIDmode
2420 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2421 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2422 xop0
= copy_to_mode_reg (mode0
, xop0
);
2424 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2425 xop1
= copy_to_mode_reg (mode1
, xop1
);
2427 /* We could handle this, but we should always be called with a pseudo
2428 for our targets and all insns should take them as outputs. */
2429 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2430 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2432 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2439 delete_insns_since (last
);
2442 /* It can't be done in this mode. Can we do it in a wider mode? */
2444 if (CLASS_HAS_WIDER_MODES_P (class))
2446 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2447 wider_mode
!= VOIDmode
;
2448 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2450 if (optab_handler (binoptab
, wider_mode
)->insn_code
2451 != CODE_FOR_nothing
)
2453 rtx t0
= gen_reg_rtx (wider_mode
);
2454 rtx t1
= gen_reg_rtx (wider_mode
);
2455 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2456 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2458 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2461 convert_move (targ0
, t0
, unsignedp
);
2462 convert_move (targ1
, t1
, unsignedp
);
2466 delete_insns_since (last
);
2471 delete_insns_since (entry_last
);
2475 /* Expand the two-valued library call indicated by BINOPTAB, but
2476 preserve only one of the values. If TARG0 is non-NULL, the first
2477 value is placed into TARG0; otherwise the second value is placed
2478 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2479 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2480 This routine assumes that the value returned by the library call is
2481 as if the return value was of an integral mode twice as wide as the
2482 mode of OP0. Returns 1 if the call was successful. */
2485 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2486 rtx targ0
, rtx targ1
, enum rtx_code code
)
2488 enum machine_mode mode
;
2489 enum machine_mode libval_mode
;
2494 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2495 gcc_assert (!targ0
!= !targ1
);
2497 mode
= GET_MODE (op0
);
2498 libfunc
= optab_libfunc (binoptab
, mode
);
2502 /* The value returned by the library function will have twice as
2503 many bits as the nominal MODE. */
2504 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2507 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2511 /* Get the part of VAL containing the value that we want. */
2512 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2513 targ0
? 0 : GET_MODE_SIZE (mode
));
2514 insns
= get_insns ();
2516 /* Move the into the desired location. */
2517 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2518 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2524 /* Wrapper around expand_unop which takes an rtx code to specify
2525 the operation to perform, not an optab pointer. All other
2526 arguments are the same. */
2528 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2529 rtx target
, int unsignedp
)
2531 optab unop
= code_to_optab
[(int) code
];
2534 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2540 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2542 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2544 enum mode_class
class = GET_MODE_CLASS (mode
);
2545 if (CLASS_HAS_WIDER_MODES_P (class))
2547 enum machine_mode wider_mode
;
2548 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2549 wider_mode
!= VOIDmode
;
2550 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2552 if (optab_handler (clz_optab
, wider_mode
)->insn_code
2553 != CODE_FOR_nothing
)
2555 rtx xop0
, temp
, last
;
2557 last
= get_last_insn ();
2560 target
= gen_reg_rtx (mode
);
2561 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2562 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2564 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2565 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2566 - GET_MODE_BITSIZE (mode
)),
2567 target
, true, OPTAB_DIRECT
);
2569 delete_insns_since (last
);
2578 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2579 quantities, choosing which based on whether the high word is nonzero. */
2581 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2583 rtx xop0
= force_reg (mode
, op0
);
2584 rtx subhi
= gen_highpart (word_mode
, xop0
);
2585 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2586 rtx hi0_label
= gen_label_rtx ();
2587 rtx after_label
= gen_label_rtx ();
2588 rtx seq
, temp
, result
;
2590 /* If we were not given a target, use a word_mode register, not a
2591 'mode' register. The result will fit, and nobody is expecting
2592 anything bigger (the return type of __builtin_clz* is int). */
2594 target
= gen_reg_rtx (word_mode
);
2596 /* In any case, write to a word_mode scratch in both branches of the
2597 conditional, so we can ensure there is a single move insn setting
2598 'target' to tag a REG_EQUAL note on. */
2599 result
= gen_reg_rtx (word_mode
);
2603 /* If the high word is not equal to zero,
2604 then clz of the full value is clz of the high word. */
2605 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2606 word_mode
, true, hi0_label
);
2608 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2613 convert_move (result
, temp
, true);
2615 emit_jump_insn (gen_jump (after_label
));
2618 /* Else clz of the full value is clz of the low word plus the number
2619 of bits in the high word. */
2620 emit_label (hi0_label
);
2622 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2625 temp
= expand_binop (word_mode
, add_optab
, temp
,
2626 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2627 result
, true, OPTAB_DIRECT
);
2631 convert_move (result
, temp
, true);
2633 emit_label (after_label
);
2634 convert_move (target
, result
, true);
2639 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2651 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2653 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2655 enum mode_class
class = GET_MODE_CLASS (mode
);
2656 enum machine_mode wider_mode
;
2659 if (!CLASS_HAS_WIDER_MODES_P (class))
2662 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2663 wider_mode
!= VOIDmode
;
2664 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2665 if (optab_handler (bswap_optab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
2670 last
= get_last_insn ();
2672 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2673 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2676 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2677 size_int (GET_MODE_BITSIZE (wider_mode
)
2678 - GET_MODE_BITSIZE (mode
)),
2684 target
= gen_reg_rtx (mode
);
2685 emit_move_insn (target
, gen_lowpart (mode
, x
));
2688 delete_insns_since (last
);
2693 /* Try calculating bswap as two bswaps of two word-sized operands. */
2696 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2700 t1
= expand_unop (word_mode
, bswap_optab
,
2701 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2702 t0
= expand_unop (word_mode
, bswap_optab
,
2703 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2706 target
= gen_reg_rtx (mode
);
2708 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2709 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2710 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2715 /* Try calculating (parity x) as (and (popcount x) 1), where
2716 popcount can also be done in a wider mode. */
2718 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2720 enum mode_class
class = GET_MODE_CLASS (mode
);
2721 if (CLASS_HAS_WIDER_MODES_P (class))
2723 enum machine_mode wider_mode
;
2724 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2725 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2727 if (optab_handler (popcount_optab
, wider_mode
)->insn_code
2728 != CODE_FOR_nothing
)
2730 rtx xop0
, temp
, last
;
2732 last
= get_last_insn ();
2735 target
= gen_reg_rtx (mode
);
2736 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2737 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2740 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2741 target
, true, OPTAB_DIRECT
);
2743 delete_insns_since (last
);
2752 /* Try calculating ctz(x) as K - clz(x & -x) ,
2753 where K is GET_MODE_BITSIZE(mode) - 1.
2755 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2756 don't have to worry about what the hardware does in that case. (If
2757 the clz instruction produces the usual value at 0, which is K, the
2758 result of this code sequence will be -1; expand_ffs, below, relies
2759 on this. It might be nice to have it be K instead, for consistency
2760 with the (very few) processors that provide a ctz with a defined
2761 value, but that would take one more instruction, and it would be
2762 less convenient for expand_ffs anyway. */
2765 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2769 if (optab_handler (clz_optab
, mode
)->insn_code
== CODE_FOR_nothing
)
2774 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2776 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2777 true, OPTAB_DIRECT
);
2779 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2781 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_BITSIZE (mode
) - 1),
2783 true, OPTAB_DIRECT
);
2793 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2799 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2800 else with the sequence used by expand_clz.
2802 The ffs builtin promises to return zero for a zero value and ctz/clz
2803 may have an undefined value in that case. If they do not give us a
2804 convenient value, we have to generate a test and branch. */
2806 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2808 HOST_WIDE_INT val
= 0;
2809 bool defined_at_zero
= false;
2812 if (optab_handler (ctz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2816 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2820 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2822 else if (optab_handler (clz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2825 temp
= expand_ctz (mode
, op0
, 0);
2829 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2831 defined_at_zero
= true;
2832 val
= (GET_MODE_BITSIZE (mode
) - 1) - val
;
2838 if (defined_at_zero
&& val
== -1)
2839 /* No correction needed at zero. */;
2842 /* We don't try to do anything clever with the situation found
2843 on some processors (eg Alpha) where ctz(0:mode) ==
2844 bitsize(mode). If someone can think of a way to send N to -1
2845 and leave alone all values in the range 0..N-1 (where N is a
2846 power of two), cheaper than this test-and-branch, please add it.
2848 The test-and-branch is done after the operation itself, in case
2849 the operation sets condition codes that can be recycled for this.
2850 (This is true on i386, for instance.) */
2852 rtx nonzero_label
= gen_label_rtx ();
2853 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2854 mode
, true, nonzero_label
);
2856 convert_move (temp
, GEN_INT (-1), false);
2857 emit_label (nonzero_label
);
2860 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2861 to produce a value in the range 0..bitsize. */
2862 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2863 target
, false, OPTAB_DIRECT
);
2870 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2879 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2880 conditions, VAL may already be a SUBREG against which we cannot generate
2881 a further SUBREG. In this case, we expect forcing the value into a
2882 register will work around the situation. */
2885 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2886 enum machine_mode imode
)
2889 ret
= lowpart_subreg (omode
, val
, imode
);
2892 val
= force_reg (imode
, val
);
2893 ret
= lowpart_subreg (omode
, val
, imode
);
2894 gcc_assert (ret
!= NULL
);
2899 /* Expand a floating point absolute value or negation operation via a
2900 logical operation on the sign bit. */
2903 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2904 rtx op0
, rtx target
)
2906 const struct real_format
*fmt
;
2907 int bitpos
, word
, nwords
, i
;
2908 enum machine_mode imode
;
2909 HOST_WIDE_INT hi
, lo
;
2912 /* The format has to have a simple sign bit. */
2913 fmt
= REAL_MODE_FORMAT (mode
);
2917 bitpos
= fmt
->signbit_rw
;
2921 /* Don't create negative zeros if the format doesn't support them. */
2922 if (code
== NEG
&& !fmt
->has_signed_zero
)
2925 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2927 imode
= int_mode_for_mode (mode
);
2928 if (imode
== BLKmode
)
2937 if (FLOAT_WORDS_BIG_ENDIAN
)
2938 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2940 word
= bitpos
/ BITS_PER_WORD
;
2941 bitpos
= bitpos
% BITS_PER_WORD
;
2942 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2945 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2948 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2952 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2958 if (target
== 0 || target
== op0
)
2959 target
= gen_reg_rtx (mode
);
2965 for (i
= 0; i
< nwords
; ++i
)
2967 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2968 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2972 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2974 immed_double_const (lo
, hi
, imode
),
2975 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2976 if (temp
!= targ_piece
)
2977 emit_move_insn (targ_piece
, temp
);
2980 emit_move_insn (targ_piece
, op0_piece
);
2983 insns
= get_insns ();
2986 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2987 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2991 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2992 gen_lowpart (imode
, op0
),
2993 immed_double_const (lo
, hi
, imode
),
2994 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2995 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2997 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2998 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
3004 /* As expand_unop, but will fail rather than attempt the operation in a
3005 different mode or with a libcall. */
3007 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3010 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
3012 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
3013 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3015 rtx last
= get_last_insn ();
3021 temp
= gen_reg_rtx (mode
);
3023 if (GET_MODE (xop0
) != VOIDmode
3024 && GET_MODE (xop0
) != mode0
)
3025 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
3027 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3029 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
3030 xop0
= copy_to_mode_reg (mode0
, xop0
);
3032 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
3033 temp
= gen_reg_rtx (mode
);
3035 pat
= GEN_FCN (icode
) (temp
, xop0
);
3038 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3039 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
3041 delete_insns_since (last
);
3042 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3050 delete_insns_since (last
);
3055 /* Generate code to perform an operation specified by UNOPTAB
3056 on operand OP0, with result having machine-mode MODE.
3058 UNSIGNEDP is for the case where we have to widen the operands
3059 to perform the operation. It says to use zero-extension.
3061 If TARGET is nonzero, the value
3062 is generated there, if it is convenient to do so.
3063 In all cases an rtx is returned for the locus of the value;
3064 this may or may not be TARGET. */
3067 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3070 enum mode_class
class = GET_MODE_CLASS (mode
);
3071 enum machine_mode wider_mode
;
3075 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3079 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3081 /* Widening (or narrowing) clz needs special treatment. */
3082 if (unoptab
== clz_optab
)
3084 temp
= widen_clz (mode
, op0
, target
);
3088 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3089 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3091 temp
= expand_doubleword_clz (mode
, op0
, target
);
3099 /* Widening (or narrowing) bswap needs special treatment. */
3100 if (unoptab
== bswap_optab
)
3102 temp
= widen_bswap (mode
, op0
, target
);
3106 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3107 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3109 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3117 if (CLASS_HAS_WIDER_MODES_P (class))
3118 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3119 wider_mode
!= VOIDmode
;
3120 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3122 if (optab_handler (unoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
3125 rtx last
= get_last_insn ();
3127 /* For certain operations, we need not actually extend
3128 the narrow operand, as long as we will truncate the
3129 results to the same narrowness. */
3131 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3132 (unoptab
== neg_optab
3133 || unoptab
== one_cmpl_optab
)
3134 && class == MODE_INT
);
3136 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3141 if (class != MODE_INT
3142 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
3143 GET_MODE_BITSIZE (wider_mode
)))
3146 target
= gen_reg_rtx (mode
);
3147 convert_move (target
, temp
, 0);
3151 return gen_lowpart (mode
, temp
);
3154 delete_insns_since (last
);
3158 /* These can be done a word at a time. */
3159 if (unoptab
== one_cmpl_optab
3160 && class == MODE_INT
3161 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3162 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3167 if (target
== 0 || target
== op0
)
3168 target
= gen_reg_rtx (mode
);
3172 /* Do the actual arithmetic. */
3173 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3175 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3176 rtx x
= expand_unop (word_mode
, unoptab
,
3177 operand_subword_force (op0
, i
, mode
),
3178 target_piece
, unsignedp
);
3180 if (target_piece
!= x
)
3181 emit_move_insn (target_piece
, x
);
3184 insns
= get_insns ();
3187 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
3188 gen_rtx_fmt_e (unoptab
->code
, mode
,
3193 if (unoptab
->code
== NEG
)
3195 /* Try negating floating point values by flipping the sign bit. */
3196 if (SCALAR_FLOAT_MODE_P (mode
))
3198 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3203 /* If there is no negation pattern, and we have no negative zero,
3204 try subtracting from zero. */
3205 if (!HONOR_SIGNED_ZEROS (mode
))
3207 temp
= expand_binop (mode
, (unoptab
== negv_optab
3208 ? subv_optab
: sub_optab
),
3209 CONST0_RTX (mode
), op0
, target
,
3210 unsignedp
, OPTAB_DIRECT
);
3216 /* Try calculating parity (x) as popcount (x) % 2. */
3217 if (unoptab
== parity_optab
)
3219 temp
= expand_parity (mode
, op0
, target
);
3224 /* Try implementing ffs (x) in terms of clz (x). */
3225 if (unoptab
== ffs_optab
)
3227 temp
= expand_ffs (mode
, op0
, target
);
3232 /* Try implementing ctz (x) in terms of clz (x). */
3233 if (unoptab
== ctz_optab
)
3235 temp
= expand_ctz (mode
, op0
, target
);
3241 /* Now try a library call in this mode. */
3242 libfunc
= optab_libfunc (unoptab
, mode
);
3247 enum machine_mode outmode
= mode
;
3249 /* All of these functions return small values. Thus we choose to
3250 have them return something that isn't a double-word. */
3251 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3252 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
3254 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
3258 /* Pass 1 for NO_QUEUE so we don't lose any increments
3259 if the libcall is cse'd or moved. */
3260 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3262 insns
= get_insns ();
3265 target
= gen_reg_rtx (outmode
);
3266 emit_libcall_block (insns
, target
, value
,
3267 gen_rtx_fmt_e (unoptab
->code
, outmode
, op0
));
3272 /* It can't be done in this mode. Can we do it in a wider mode? */
3274 if (CLASS_HAS_WIDER_MODES_P (class))
3276 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3277 wider_mode
!= VOIDmode
;
3278 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3280 if ((optab_handler (unoptab
, wider_mode
)->insn_code
3281 != CODE_FOR_nothing
)
3282 || optab_libfunc (unoptab
, wider_mode
))
3285 rtx last
= get_last_insn ();
3287 /* For certain operations, we need not actually extend
3288 the narrow operand, as long as we will truncate the
3289 results to the same narrowness. */
3291 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3292 (unoptab
== neg_optab
3293 || unoptab
== one_cmpl_optab
)
3294 && class == MODE_INT
);
3296 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3299 /* If we are generating clz using wider mode, adjust the
3301 if (unoptab
== clz_optab
&& temp
!= 0)
3302 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3303 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
3304 - GET_MODE_BITSIZE (mode
)),
3305 target
, true, OPTAB_DIRECT
);
3309 if (class != MODE_INT
)
3312 target
= gen_reg_rtx (mode
);
3313 convert_move (target
, temp
, 0);
3317 return gen_lowpart (mode
, temp
);
3320 delete_insns_since (last
);
3325 /* One final attempt at implementing negation via subtraction,
3326 this time allowing widening of the operand. */
3327 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3330 temp
= expand_binop (mode
,
3331 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3332 CONST0_RTX (mode
), op0
,
3333 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3341 /* Emit code to compute the absolute value of OP0, with result to
3342 TARGET if convenient. (TARGET may be 0.) The return value says
3343 where the result actually is to be found.
3345 MODE is the mode of the operand; the mode of the result is
3346 different but can be deduced from MODE.
3351 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3352 int result_unsignedp
)
3357 result_unsignedp
= 1;
3359 /* First try to do it with a special abs instruction. */
3360 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3365 /* For floating point modes, try clearing the sign bit. */
3366 if (SCALAR_FLOAT_MODE_P (mode
))
3368 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3373 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3374 if (optab_handler (smax_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3375 && !HONOR_SIGNED_ZEROS (mode
))
3377 rtx last
= get_last_insn ();
3379 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3381 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3387 delete_insns_since (last
);
3390 /* If this machine has expensive jumps, we can do integer absolute
3391 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3392 where W is the width of MODE. */
3394 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
3396 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3397 size_int (GET_MODE_BITSIZE (mode
) - 1),
3400 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3403 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3404 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3414 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3415 int result_unsignedp
, int safe
)
3420 result_unsignedp
= 1;
3422 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3426 /* If that does not win, use conditional jump and negate. */
3428 /* It is safe to use the target if it is the same
3429 as the source if this is also a pseudo register */
3430 if (op0
== target
&& REG_P (op0
)
3431 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3434 op1
= gen_label_rtx ();
3435 if (target
== 0 || ! safe
3436 || GET_MODE (target
) != mode
3437 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3439 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3440 target
= gen_reg_rtx (mode
);
3442 emit_move_insn (target
, op0
);
3445 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3446 NULL_RTX
, NULL_RTX
, op1
);
3448 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3451 emit_move_insn (target
, op0
);
3457 /* A subroutine of expand_copysign, perform the copysign operation using the
3458 abs and neg primitives advertised to exist on the target. The assumption
3459 is that we have a split register file, and leaving op0 in fp registers,
3460 and not playing with subregs so much, will help the register allocator. */
3463 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3464 int bitpos
, bool op0_is_abs
)
3466 enum machine_mode imode
;
3473 /* Check if the back end provides an insn that handles signbit for the
3475 icode
= (int) signbit_optab
->handlers
[(int) mode
].insn_code
;
3476 if (icode
!= CODE_FOR_nothing
)
3478 imode
= insn_data
[icode
].operand
[0].mode
;
3479 sign
= gen_reg_rtx (imode
);
3480 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3484 HOST_WIDE_INT hi
, lo
;
3486 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3488 imode
= int_mode_for_mode (mode
);
3489 if (imode
== BLKmode
)
3491 op1
= gen_lowpart (imode
, op1
);
3498 if (FLOAT_WORDS_BIG_ENDIAN
)
3499 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3501 word
= bitpos
/ BITS_PER_WORD
;
3502 bitpos
= bitpos
% BITS_PER_WORD
;
3503 op1
= operand_subword_force (op1
, word
, mode
);
3506 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3509 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3513 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3517 sign
= gen_reg_rtx (imode
);
3518 sign
= expand_binop (imode
, and_optab
, op1
,
3519 immed_double_const (lo
, hi
, imode
),
3520 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3525 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3532 if (target
== NULL_RTX
)
3533 target
= copy_to_reg (op0
);
3535 emit_move_insn (target
, op0
);
3538 label
= gen_label_rtx ();
3539 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3541 if (GET_CODE (op0
) == CONST_DOUBLE
)
3542 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3544 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3546 emit_move_insn (target
, op0
);
3554 /* A subroutine of expand_copysign, perform the entire copysign operation
3555 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3556 is true if op0 is known to have its sign bit clear. */
3559 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3560 int bitpos
, bool op0_is_abs
)
3562 enum machine_mode imode
;
3563 HOST_WIDE_INT hi
, lo
;
3564 int word
, nwords
, i
;
3567 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3569 imode
= int_mode_for_mode (mode
);
3570 if (imode
== BLKmode
)
3579 if (FLOAT_WORDS_BIG_ENDIAN
)
3580 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3582 word
= bitpos
/ BITS_PER_WORD
;
3583 bitpos
= bitpos
% BITS_PER_WORD
;
3584 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3587 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3590 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3594 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3598 if (target
== 0 || target
== op0
|| target
== op1
)
3599 target
= gen_reg_rtx (mode
);
3605 for (i
= 0; i
< nwords
; ++i
)
3607 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3608 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3613 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3614 immed_double_const (~lo
, ~hi
, imode
),
3615 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3617 op1
= expand_binop (imode
, and_optab
,
3618 operand_subword_force (op1
, i
, mode
),
3619 immed_double_const (lo
, hi
, imode
),
3620 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3622 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3623 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3624 if (temp
!= targ_piece
)
3625 emit_move_insn (targ_piece
, temp
);
3628 emit_move_insn (targ_piece
, op0_piece
);
3631 insns
= get_insns ();
3634 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3638 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3639 immed_double_const (lo
, hi
, imode
),
3640 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3642 op0
= gen_lowpart (imode
, op0
);
3644 op0
= expand_binop (imode
, and_optab
, op0
,
3645 immed_double_const (~lo
, ~hi
, imode
),
3646 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3648 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3649 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3650 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3656 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3657 scalar floating point mode. Return NULL if we do not know how to
3658 expand the operation inline. */
3661 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3663 enum machine_mode mode
= GET_MODE (op0
);
3664 const struct real_format
*fmt
;
3668 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3669 gcc_assert (GET_MODE (op1
) == mode
);
3671 /* First try to do it with a special instruction. */
3672 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3673 target
, 0, OPTAB_DIRECT
);
3677 fmt
= REAL_MODE_FORMAT (mode
);
3678 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3682 if (GET_CODE (op0
) == CONST_DOUBLE
)
3684 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3685 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3689 if (fmt
->signbit_ro
>= 0
3690 && (GET_CODE (op0
) == CONST_DOUBLE
3691 || (optab_handler (neg_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3692 && optab_handler (abs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)))
3694 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3695 fmt
->signbit_ro
, op0_is_abs
);
3700 if (fmt
->signbit_rw
< 0)
3702 return expand_copysign_bit (mode
, op0
, op1
, target
,
3703 fmt
->signbit_rw
, op0_is_abs
);
3706 /* Generate an instruction whose insn-code is INSN_CODE,
3707 with two operands: an output TARGET and an input OP0.
3708 TARGET *must* be nonzero, and the output is always stored there.
3709 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3710 the value that is stored into TARGET. */
3713 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3716 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3721 /* Now, if insn does not accept our operands, put them into pseudos. */
3723 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3724 op0
= copy_to_mode_reg (mode0
, op0
);
3726 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3727 temp
= gen_reg_rtx (GET_MODE (temp
));
3729 pat
= GEN_FCN (icode
) (temp
, op0
);
3731 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3732 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3737 emit_move_insn (target
, temp
);
3740 struct no_conflict_data
3742 rtx target
, first
, insn
;
3746 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3747 Set P->must_stay if the currently examined clobber / store has to stay
3748 in the list of insns that constitute the actual no_conflict block /
3751 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3753 struct no_conflict_data
*p
= p0
;
3755 /* If this inns directly contributes to setting the target, it must stay. */
3756 if (reg_overlap_mentioned_p (p
->target
, dest
))
3757 p
->must_stay
= true;
3758 /* If we haven't committed to keeping any other insns in the list yet,
3759 there is nothing more to check. */
3760 else if (p
->insn
== p
->first
)
3762 /* If this insn sets / clobbers a register that feeds one of the insns
3763 already in the list, this insn has to stay too. */
3764 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3765 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3766 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3767 /* Likewise if this insn depends on a register set by a previous
3768 insn in the list, or if it sets a result (presumably a hard
3769 register) that is set or clobbered by a previous insn.
3770 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3771 SET_DEST perform the former check on the address, and the latter
3772 check on the MEM. */
3773 || (GET_CODE (set
) == SET
3774 && (modified_in_p (SET_SRC (set
), p
->first
)
3775 || modified_in_p (SET_DEST (set
), p
->first
)
3776 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3777 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3778 p
->must_stay
= true;
3781 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3782 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3783 is possible to do so. */
3786 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3788 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3790 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3791 encapsulated region would not be in one basic block, i.e. when
3792 there is a control_flow_insn_p insn between FIRST and LAST. */
3793 bool attach_libcall_retval_notes
= true;
3794 rtx insn
, next
= NEXT_INSN (last
);
3796 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3797 if (control_flow_insn_p (insn
))
3799 attach_libcall_retval_notes
= false;
3803 if (attach_libcall_retval_notes
)
3805 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3807 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3809 next
= NEXT_INSN (last
);
3810 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3811 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID
,
3812 GEN_INT (libcall_id
),
3819 /* Emit code to perform a series of operations on a multi-word quantity, one
3822 Such a block is preceded by a CLOBBER of the output, consists of multiple
3823 insns, each setting one word of the output, and followed by a SET copying
3824 the output to itself.
3826 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3827 note indicating that it doesn't conflict with the (also multi-word)
3828 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3831 INSNS is a block of code generated to perform the operation, not including
3832 the CLOBBER and final copy. All insns that compute intermediate values
3833 are first emitted, followed by the block as described above.
3835 TARGET, OP0, and OP1 are the output and inputs of the operations,
3836 respectively. OP1 may be zero for a unary operation.
3838 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3841 If TARGET is not a register, INSNS is simply emitted with no special
3842 processing. Likewise if anything in INSNS is not an INSN or if
3843 there is a libcall block inside INSNS.
3845 The final insn emitted is returned. */
3848 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3850 rtx prev
, next
, first
, last
, insn
;
3852 if (!REG_P (target
) || reload_in_progress
)
3853 return emit_insn (insns
);
3855 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3856 if (!NONJUMP_INSN_P (insn
)
3857 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3858 return emit_insn (insns
);
3860 /* First emit all insns that do not store into words of the output and remove
3861 these from the list. */
3862 for (insn
= insns
; insn
; insn
= next
)
3865 struct no_conflict_data data
;
3867 next
= NEXT_INSN (insn
);
3869 /* Some ports (cris) create a libcall regions at their own. We must
3870 avoid any potential nesting of LIBCALLs. */
3871 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3872 remove_note (insn
, note
);
3873 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3874 remove_note (insn
, note
);
3875 if ((note
= find_reg_note (insn
, REG_LIBCALL_ID
, NULL
)) != NULL
)
3876 remove_note (insn
, note
);
3878 data
.target
= target
;
3882 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3883 if (! data
.must_stay
)
3885 if (PREV_INSN (insn
))
3886 NEXT_INSN (PREV_INSN (insn
)) = next
;
3891 PREV_INSN (next
) = PREV_INSN (insn
);
3897 prev
= get_last_insn ();
3899 /* Now write the CLOBBER of the output, followed by the setting of each
3900 of the words, followed by the final copy. */
3901 if (target
!= op0
&& target
!= op1
)
3902 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3904 for (insn
= insns
; insn
; insn
= next
)
3906 next
= NEXT_INSN (insn
);
3909 if (op1
&& REG_P (op1
))
3910 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3913 if (op0
&& REG_P (op0
))
3914 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3918 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
3919 != CODE_FOR_nothing
)
3921 last
= emit_move_insn (target
, target
);
3923 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3927 last
= get_last_insn ();
3929 /* Remove any existing REG_EQUAL note from "last", or else it will
3930 be mistaken for a note referring to the full contents of the
3931 alleged libcall value when found together with the REG_RETVAL
3932 note added below. An existing note can come from an insn
3933 expansion at "last". */
3934 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3938 first
= get_insns ();
3940 first
= NEXT_INSN (prev
);
3942 maybe_encapsulate_block (first
, last
, equiv
);
3947 /* Emit code to make a call to a constant function or a library call.
3949 INSNS is a list containing all insns emitted in the call.
3950 These insns leave the result in RESULT. Our block is to copy RESULT
3951 to TARGET, which is logically equivalent to EQUIV.
3953 We first emit any insns that set a pseudo on the assumption that these are
3954 loading constants into registers; doing so allows them to be safely cse'ed
3955 between blocks. Then we emit all the other insns in the block, followed by
3956 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3957 note with an operand of EQUIV.
3959 Moving assignments to pseudos outside of the block is done to improve
3960 the generated code, but is not required to generate correct code,
3961 hence being unable to move an assignment is not grounds for not making
3962 a libcall block. There are two reasons why it is safe to leave these
3963 insns inside the block: First, we know that these pseudos cannot be
3964 used in generated RTL outside the block since they are created for
3965 temporary purposes within the block. Second, CSE will not record the
3966 values of anything set inside a libcall block, so we know they must
3967 be dead at the end of the block.
3969 Except for the first group of insns (the ones setting pseudos), the
3970 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3972 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3974 rtx final_dest
= target
;
3975 rtx prev
, next
, first
, last
, insn
;
3977 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3978 into a MEM later. Protect the libcall block from this change. */
3979 if (! REG_P (target
) || REG_USERVAR_P (target
))
3980 target
= gen_reg_rtx (GET_MODE (target
));
3982 /* If we're using non-call exceptions, a libcall corresponding to an
3983 operation that may trap may also trap. */
3984 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3986 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3989 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3991 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3992 remove_note (insn
, note
);
3996 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3997 reg note to indicate that this call cannot throw or execute a nonlocal
3998 goto (unless there is already a REG_EH_REGION note, in which case
4000 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4003 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4006 XEXP (note
, 0) = constm1_rtx
;
4008 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
4012 /* First emit all insns that set pseudos. Remove them from the list as
4013 we go. Avoid insns that set pseudos which were referenced in previous
4014 insns. These can be generated by move_by_pieces, for example,
4015 to update an address. Similarly, avoid insns that reference things
4016 set in previous insns. */
4018 for (insn
= insns
; insn
; insn
= next
)
4020 rtx set
= single_set (insn
);
4023 /* Some ports (cris) create a libcall regions at their own. We must
4024 avoid any potential nesting of LIBCALLs. */
4025 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
4026 remove_note (insn
, note
);
4027 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
4028 remove_note (insn
, note
);
4029 if ((note
= find_reg_note (insn
, REG_LIBCALL_ID
, NULL
)) != NULL
)
4030 remove_note (insn
, note
);
4032 next
= NEXT_INSN (insn
);
4034 if (set
!= 0 && REG_P (SET_DEST (set
))
4035 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4037 struct no_conflict_data data
;
4039 data
.target
= const0_rtx
;
4043 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
4044 if (! data
.must_stay
)
4046 if (PREV_INSN (insn
))
4047 NEXT_INSN (PREV_INSN (insn
)) = next
;
4052 PREV_INSN (next
) = PREV_INSN (insn
);
4058 /* Some ports use a loop to copy large arguments onto the stack.
4059 Don't move anything outside such a loop. */
4064 prev
= get_last_insn ();
4066 /* Write the remaining insns followed by the final copy. */
4068 for (insn
= insns
; insn
; insn
= next
)
4070 next
= NEXT_INSN (insn
);
4075 last
= emit_move_insn (target
, result
);
4076 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
4077 != CODE_FOR_nothing
)
4078 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
4081 /* Remove any existing REG_EQUAL note from "last", or else it will
4082 be mistaken for a note referring to the full contents of the
4083 libcall value when found together with the REG_RETVAL note added
4084 below. An existing note can come from an insn expansion at
4086 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
4089 if (final_dest
!= target
)
4090 emit_move_insn (final_dest
, target
);
4093 first
= get_insns ();
4095 first
= NEXT_INSN (prev
);
4097 maybe_encapsulate_block (first
, last
, equiv
);
4100 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4101 PURPOSE describes how this comparison will be used. CODE is the rtx
4102 comparison code we will be using.
4104 ??? Actually, CODE is slightly weaker than that. A target is still
4105 required to implement all of the normal bcc operations, but not
4106 required to implement all (or any) of the unordered bcc operations. */
4109 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
4110 enum can_compare_purpose purpose
)
4114 if (optab_handler (cmp_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4116 if (purpose
== ccp_jump
)
4117 return bcc_gen_fctn
[(int) code
] != NULL
;
4118 else if (purpose
== ccp_store_flag
)
4119 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
4121 /* There's only one cmov entry point, and it's allowed to fail. */
4124 if (purpose
== ccp_jump
4125 && optab_handler (cbranch_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4127 if (purpose
== ccp_cmov
4128 && optab_handler (cmov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4130 if (purpose
== ccp_store_flag
4131 && optab_handler (cstore_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4133 mode
= GET_MODE_WIDER_MODE (mode
);
4135 while (mode
!= VOIDmode
);
4140 /* This function is called when we are going to emit a compare instruction that
4141 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4143 *PMODE is the mode of the inputs (in case they are const_int).
4144 *PUNSIGNEDP nonzero says that the operands are unsigned;
4145 this matters if they need to be widened.
4147 If they have mode BLKmode, then SIZE specifies the size of both operands.
4149 This function performs all the setup necessary so that the caller only has
4150 to emit a single comparison insn. This setup can involve doing a BLKmode
4151 comparison or emitting a library call to perform the comparison if no insn
4152 is available to handle it.
4153 The values which are passed in through pointers can be modified; the caller
4154 should perform the comparison on the modified values. Constant
4155 comparisons must have already been folded. */
4158 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
4159 enum machine_mode
*pmode
, int *punsignedp
,
4160 enum can_compare_purpose purpose
)
4162 enum machine_mode mode
= *pmode
;
4163 rtx x
= *px
, y
= *py
;
4164 int unsignedp
= *punsignedp
;
4167 /* If we are inside an appropriately-short loop and we are optimizing,
4168 force expensive constants into a register. */
4169 if (CONSTANT_P (x
) && optimize
4170 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
4171 x
= force_reg (mode
, x
);
4173 if (CONSTANT_P (y
) && optimize
4174 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
4175 y
= force_reg (mode
, y
);
4178 /* Make sure if we have a canonical comparison. The RTL
4179 documentation states that canonical comparisons are required only
4180 for targets which have cc0. */
4181 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4184 /* Don't let both operands fail to indicate the mode. */
4185 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4186 x
= force_reg (mode
, x
);
4188 /* Handle all BLKmode compares. */
4190 if (mode
== BLKmode
)
4192 enum machine_mode cmp_mode
, result_mode
;
4193 enum insn_code cmp_code
;
4198 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4202 /* Try to use a memory block compare insn - either cmpstr
4203 or cmpmem will do. */
4204 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4205 cmp_mode
!= VOIDmode
;
4206 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4208 cmp_code
= cmpmem_optab
[cmp_mode
];
4209 if (cmp_code
== CODE_FOR_nothing
)
4210 cmp_code
= cmpstr_optab
[cmp_mode
];
4211 if (cmp_code
== CODE_FOR_nothing
)
4212 cmp_code
= cmpstrn_optab
[cmp_mode
];
4213 if (cmp_code
== CODE_FOR_nothing
)
4216 /* Must make sure the size fits the insn's mode. */
4217 if ((GET_CODE (size
) == CONST_INT
4218 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4219 || (GET_MODE_BITSIZE (GET_MODE (size
))
4220 > GET_MODE_BITSIZE (cmp_mode
)))
4223 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4224 result
= gen_reg_rtx (result_mode
);
4225 size
= convert_to_mode (cmp_mode
, size
, 1);
4226 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4230 *pmode
= result_mode
;
4234 /* Otherwise call a library function, memcmp. */
4235 libfunc
= memcmp_libfunc
;
4236 length_type
= sizetype
;
4237 result_mode
= TYPE_MODE (integer_type_node
);
4238 cmp_mode
= TYPE_MODE (length_type
);
4239 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4240 TYPE_UNSIGNED (length_type
));
4242 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
4249 *pmode
= result_mode
;
4253 /* Don't allow operands to the compare to trap, as that can put the
4254 compare and branch in different basic blocks. */
4255 if (flag_non_call_exceptions
)
4258 x
= force_reg (mode
, x
);
4260 y
= force_reg (mode
, y
);
4265 if (can_compare_p (*pcomparison
, mode
, purpose
))
4268 /* Handle a lib call just for the mode we are using. */
4270 libfunc
= optab_libfunc (cmp_optab
, mode
);
4271 if (libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
4276 /* If we want unsigned, and this mode has a distinct unsigned
4277 comparison routine, use that. */
4280 ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4282 if (unsignedp
&& ulibfunc
)
4285 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4286 targetm
.libgcc_cmp_return_mode (),
4287 2, x
, mode
, y
, mode
);
4289 /* There are two kinds of comparison routines. Biased routines
4290 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4291 of gcc expect that the comparison operation is equivalent
4292 to the modified comparison. For signed comparisons compare the
4293 result against 1 in the biased case, and zero in the unbiased
4294 case. For unsigned comparisons always compare against 1 after
4295 biasing the unbiased result by adding 1. This gives us a way to
4301 if (!TARGET_LIB_INT_CMP_BIASED
)
4304 *px
= plus_constant (result
, 1);
4311 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
4312 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4315 /* Before emitting an insn with code ICODE, make sure that X, which is going
4316 to be used for operand OPNUM of the insn, is converted from mode MODE to
4317 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4318 that it is accepted by the operand predicate. Return the new value. */
4321 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
4322 enum machine_mode wider_mode
, int unsignedp
)
4324 if (mode
!= wider_mode
)
4325 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4327 if (!insn_data
[icode
].operand
[opnum
].predicate
4328 (x
, insn_data
[icode
].operand
[opnum
].mode
))
4330 if (reload_completed
)
4332 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
4338 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4339 we can do the comparison.
4340 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4341 be NULL_RTX which indicates that only a comparison is to be generated. */
4344 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
4345 enum rtx_code comparison
, int unsignedp
, rtx label
)
4347 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
4348 enum mode_class
class = GET_MODE_CLASS (mode
);
4349 enum machine_mode wider_mode
= mode
;
4351 /* Try combined insns first. */
4354 enum insn_code icode
;
4355 PUT_MODE (test
, wider_mode
);
4359 icode
= optab_handler (cbranch_optab
, wider_mode
)->insn_code
;
4361 if (icode
!= CODE_FOR_nothing
4362 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
4364 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
4365 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
4366 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
4371 /* Handle some compares against zero. */
4372 icode
= (int) optab_handler (tst_optab
, wider_mode
)->insn_code
;
4373 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
4375 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4376 emit_insn (GEN_FCN (icode
) (x
));
4378 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4382 /* Handle compares for which there is a directly suitable insn. */
4384 icode
= (int) optab_handler (cmp_optab
, wider_mode
)->insn_code
;
4385 if (icode
!= CODE_FOR_nothing
)
4387 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4388 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
4389 emit_insn (GEN_FCN (icode
) (x
, y
));
4391 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4395 if (!CLASS_HAS_WIDER_MODES_P (class))
4398 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
4400 while (wider_mode
!= VOIDmode
);
4405 /* Generate code to compare X with Y so that the condition codes are
4406 set and to jump to LABEL if the condition is true. If X is a
4407 constant and Y is not a constant, then the comparison is swapped to
4408 ensure that the comparison RTL has the canonical form.
4410 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4411 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4412 the proper branch condition code.
4414 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4416 MODE is the mode of the inputs (in case they are const_int).
4418 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4419 be passed unchanged to emit_cmp_insn, then potentially converted into an
4420 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4423 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4424 enum machine_mode mode
, int unsignedp
, rtx label
)
4426 rtx op0
= x
, op1
= y
;
4428 /* Swap operands and condition to ensure canonical RTL. */
4429 if (swap_commutative_operands_p (x
, y
))
4431 /* If we're not emitting a branch, callers are required to pass
4432 operands in an order conforming to canonical RTL. We relax this
4433 for commutative comparisons so callers using EQ don't need to do
4434 swapping by hand. */
4435 gcc_assert (label
|| (comparison
== swap_condition (comparison
)));
4438 comparison
= swap_condition (comparison
);
4442 /* If OP0 is still a constant, then both X and Y must be constants.
4443 Force X into a register to create canonical RTL. */
4444 if (CONSTANT_P (op0
))
4445 op0
= force_reg (mode
, op0
);
4449 comparison
= unsigned_condition (comparison
);
4451 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
4453 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
4456 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4459 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4460 enum machine_mode mode
, int unsignedp
)
4462 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
4465 /* Emit a library call comparison between floating point X and Y.
4466 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4469 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
4470 enum machine_mode
*pmode
, int *punsignedp
)
4472 enum rtx_code comparison
= *pcomparison
;
4473 enum rtx_code swapped
= swap_condition (comparison
);
4474 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4477 enum machine_mode orig_mode
= GET_MODE (x
);
4478 enum machine_mode mode
, cmp_mode
;
4479 rtx value
, target
, insns
, equiv
;
4481 bool reversed_p
= false;
4482 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4484 for (mode
= orig_mode
;
4486 mode
= GET_MODE_WIDER_MODE (mode
))
4488 if ((libfunc
= optab_libfunc (code_to_optab
[comparison
], mode
)))
4491 if ((libfunc
= optab_libfunc (code_to_optab
[swapped
] , mode
)))
4494 tmp
= x
; x
= y
; y
= tmp
;
4495 comparison
= swapped
;
4499 if ((libfunc
= optab_libfunc (code_to_optab
[reversed
], mode
))
4500 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
4502 comparison
= reversed
;
4508 gcc_assert (mode
!= VOIDmode
);
4510 if (mode
!= orig_mode
)
4512 x
= convert_to_mode (mode
, x
, 0);
4513 y
= convert_to_mode (mode
, y
, 0);
4516 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4517 the RTL. The allows the RTL optimizers to delete the libcall if the
4518 condition can be determined at compile-time. */
4519 if (comparison
== UNORDERED
)
4521 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4522 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4523 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4524 temp
, const_true_rtx
, equiv
);
4528 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4529 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4531 rtx true_rtx
, false_rtx
;
4536 true_rtx
= const0_rtx
;
4537 false_rtx
= const_true_rtx
;
4541 true_rtx
= const_true_rtx
;
4542 false_rtx
= const0_rtx
;
4546 true_rtx
= const1_rtx
;
4547 false_rtx
= const0_rtx
;
4551 true_rtx
= const0_rtx
;
4552 false_rtx
= constm1_rtx
;
4556 true_rtx
= constm1_rtx
;
4557 false_rtx
= const0_rtx
;
4561 true_rtx
= const0_rtx
;
4562 false_rtx
= const1_rtx
;
4568 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4569 equiv
, true_rtx
, false_rtx
);
4574 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4575 cmp_mode
, 2, x
, mode
, y
, mode
);
4576 insns
= get_insns ();
4579 target
= gen_reg_rtx (cmp_mode
);
4580 emit_libcall_block (insns
, target
, value
, equiv
);
4582 if (comparison
== UNORDERED
4583 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4584 comparison
= reversed_p
? EQ
: NE
;
4589 *pcomparison
= comparison
;
4593 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4596 emit_indirect_jump (rtx loc
)
4598 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4600 loc
= copy_to_mode_reg (Pmode
, loc
);
4602 emit_jump_insn (gen_indirect_jump (loc
));
4606 #ifdef HAVE_conditional_move
4608 /* Emit a conditional move instruction if the machine supports one for that
4609 condition and machine mode.
4611 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4612 the mode to use should they be constants. If it is VOIDmode, they cannot
4615 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4616 should be stored there. MODE is the mode to use should they be constants.
4617 If it is VOIDmode, they cannot both be constants.
4619 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4620 is not supported. */
4623 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4624 enum machine_mode cmode
, rtx op2
, rtx op3
,
4625 enum machine_mode mode
, int unsignedp
)
4627 rtx tem
, subtarget
, comparison
, insn
;
4628 enum insn_code icode
;
4629 enum rtx_code reversed
;
4631 /* If one operand is constant, make it the second one. Only do this
4632 if the other operand is not constant as well. */
4634 if (swap_commutative_operands_p (op0
, op1
))
4639 code
= swap_condition (code
);
4642 /* get_condition will prefer to generate LT and GT even if the old
4643 comparison was against zero, so undo that canonicalization here since
4644 comparisons against zero are cheaper. */
4645 if (code
== LT
&& op1
== const1_rtx
)
4646 code
= LE
, op1
= const0_rtx
;
4647 else if (code
== GT
&& op1
== constm1_rtx
)
4648 code
= GE
, op1
= const0_rtx
;
4650 if (cmode
== VOIDmode
)
4651 cmode
= GET_MODE (op0
);
4653 if (swap_commutative_operands_p (op2
, op3
)
4654 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4663 if (mode
== VOIDmode
)
4664 mode
= GET_MODE (op2
);
4666 icode
= movcc_gen_code
[mode
];
4668 if (icode
== CODE_FOR_nothing
)
4672 target
= gen_reg_rtx (mode
);
4676 /* If the insn doesn't accept these operands, put them in pseudos. */
4678 if (!insn_data
[icode
].operand
[0].predicate
4679 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4680 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4682 if (!insn_data
[icode
].operand
[2].predicate
4683 (op2
, insn_data
[icode
].operand
[2].mode
))
4684 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4686 if (!insn_data
[icode
].operand
[3].predicate
4687 (op3
, insn_data
[icode
].operand
[3].mode
))
4688 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4690 /* Everything should now be in the suitable form, so emit the compare insn
4691 and then the conditional move. */
4694 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4696 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4697 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4698 return NULL and let the caller figure out how best to deal with this
4700 if (GET_CODE (comparison
) != code
)
4703 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4705 /* If that failed, then give up. */
4711 if (subtarget
!= target
)
4712 convert_move (target
, subtarget
, 0);
4717 /* Return nonzero if a conditional move of mode MODE is supported.
4719 This function is for combine so it can tell whether an insn that looks
4720 like a conditional move is actually supported by the hardware. If we
4721 guess wrong we lose a bit on optimization, but that's it. */
4722 /* ??? sparc64 supports conditionally moving integers values based on fp
4723 comparisons, and vice versa. How do we handle them? */
4726 can_conditionally_move_p (enum machine_mode mode
)
4728 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4734 #endif /* HAVE_conditional_move */
4736 /* Emit a conditional addition instruction if the machine supports one for that
4737 condition and machine mode.
4739 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4740 the mode to use should they be constants. If it is VOIDmode, they cannot
4743 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4744 should be stored there. MODE is the mode to use should they be constants.
4745 If it is VOIDmode, they cannot both be constants.
4747 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4748 is not supported. */
4751 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4752 enum machine_mode cmode
, rtx op2
, rtx op3
,
4753 enum machine_mode mode
, int unsignedp
)
4755 rtx tem
, subtarget
, comparison
, insn
;
4756 enum insn_code icode
;
4757 enum rtx_code reversed
;
4759 /* If one operand is constant, make it the second one. Only do this
4760 if the other operand is not constant as well. */
4762 if (swap_commutative_operands_p (op0
, op1
))
4767 code
= swap_condition (code
);
4770 /* get_condition will prefer to generate LT and GT even if the old
4771 comparison was against zero, so undo that canonicalization here since
4772 comparisons against zero are cheaper. */
4773 if (code
== LT
&& op1
== const1_rtx
)
4774 code
= LE
, op1
= const0_rtx
;
4775 else if (code
== GT
&& op1
== constm1_rtx
)
4776 code
= GE
, op1
= const0_rtx
;
4778 if (cmode
== VOIDmode
)
4779 cmode
= GET_MODE (op0
);
4781 if (swap_commutative_operands_p (op2
, op3
)
4782 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4791 if (mode
== VOIDmode
)
4792 mode
= GET_MODE (op2
);
4794 icode
= optab_handler (addcc_optab
, mode
)->insn_code
;
4796 if (icode
== CODE_FOR_nothing
)
4800 target
= gen_reg_rtx (mode
);
4802 /* If the insn doesn't accept these operands, put them in pseudos. */
4804 if (!insn_data
[icode
].operand
[0].predicate
4805 (target
, insn_data
[icode
].operand
[0].mode
))
4806 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4810 if (!insn_data
[icode
].operand
[2].predicate
4811 (op2
, insn_data
[icode
].operand
[2].mode
))
4812 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4814 if (!insn_data
[icode
].operand
[3].predicate
4815 (op3
, insn_data
[icode
].operand
[3].mode
))
4816 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4818 /* Everything should now be in the suitable form, so emit the compare insn
4819 and then the conditional move. */
4822 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4824 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4825 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4826 return NULL and let the caller figure out how best to deal with this
4828 if (GET_CODE (comparison
) != code
)
4831 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4833 /* If that failed, then give up. */
4839 if (subtarget
!= target
)
4840 convert_move (target
, subtarget
, 0);
4845 /* These functions attempt to generate an insn body, rather than
4846 emitting the insn, but if the gen function already emits them, we
4847 make no attempt to turn them back into naked patterns. */
4849 /* Generate and return an insn body to add Y to X. */
4852 gen_add2_insn (rtx x
, rtx y
)
4854 int icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4856 gcc_assert (insn_data
[icode
].operand
[0].predicate
4857 (x
, insn_data
[icode
].operand
[0].mode
));
4858 gcc_assert (insn_data
[icode
].operand
[1].predicate
4859 (x
, insn_data
[icode
].operand
[1].mode
));
4860 gcc_assert (insn_data
[icode
].operand
[2].predicate
4861 (y
, insn_data
[icode
].operand
[2].mode
));
4863 return GEN_FCN (icode
) (x
, x
, y
);
4866 /* Generate and return an insn body to add r1 and c,
4867 storing the result in r0. */
4870 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4872 int icode
= (int) optab_handler (add_optab
, GET_MODE (r0
))->insn_code
;
4874 if (icode
== CODE_FOR_nothing
4875 || !(insn_data
[icode
].operand
[0].predicate
4876 (r0
, insn_data
[icode
].operand
[0].mode
))
4877 || !(insn_data
[icode
].operand
[1].predicate
4878 (r1
, insn_data
[icode
].operand
[1].mode
))
4879 || !(insn_data
[icode
].operand
[2].predicate
4880 (c
, insn_data
[icode
].operand
[2].mode
)))
4883 return GEN_FCN (icode
) (r0
, r1
, c
);
4887 have_add2_insn (rtx x
, rtx y
)
4891 gcc_assert (GET_MODE (x
) != VOIDmode
);
4893 icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4895 if (icode
== CODE_FOR_nothing
)
4898 if (!(insn_data
[icode
].operand
[0].predicate
4899 (x
, insn_data
[icode
].operand
[0].mode
))
4900 || !(insn_data
[icode
].operand
[1].predicate
4901 (x
, insn_data
[icode
].operand
[1].mode
))
4902 || !(insn_data
[icode
].operand
[2].predicate
4903 (y
, insn_data
[icode
].operand
[2].mode
)))
4909 /* Generate and return an insn body to subtract Y from X. */
4912 gen_sub2_insn (rtx x
, rtx y
)
4914 int icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4916 gcc_assert (insn_data
[icode
].operand
[0].predicate
4917 (x
, insn_data
[icode
].operand
[0].mode
));
4918 gcc_assert (insn_data
[icode
].operand
[1].predicate
4919 (x
, insn_data
[icode
].operand
[1].mode
));
4920 gcc_assert (insn_data
[icode
].operand
[2].predicate
4921 (y
, insn_data
[icode
].operand
[2].mode
));
4923 return GEN_FCN (icode
) (x
, x
, y
);
4926 /* Generate and return an insn body to subtract r1 and c,
4927 storing the result in r0. */
4930 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4932 int icode
= (int) optab_handler (sub_optab
, GET_MODE (r0
))->insn_code
;
4934 if (icode
== CODE_FOR_nothing
4935 || !(insn_data
[icode
].operand
[0].predicate
4936 (r0
, insn_data
[icode
].operand
[0].mode
))
4937 || !(insn_data
[icode
].operand
[1].predicate
4938 (r1
, insn_data
[icode
].operand
[1].mode
))
4939 || !(insn_data
[icode
].operand
[2].predicate
4940 (c
, insn_data
[icode
].operand
[2].mode
)))
4943 return GEN_FCN (icode
) (r0
, r1
, c
);
4947 have_sub2_insn (rtx x
, rtx y
)
4951 gcc_assert (GET_MODE (x
) != VOIDmode
);
4953 icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4955 if (icode
== CODE_FOR_nothing
)
4958 if (!(insn_data
[icode
].operand
[0].predicate
4959 (x
, insn_data
[icode
].operand
[0].mode
))
4960 || !(insn_data
[icode
].operand
[1].predicate
4961 (x
, insn_data
[icode
].operand
[1].mode
))
4962 || !(insn_data
[icode
].operand
[2].predicate
4963 (y
, insn_data
[icode
].operand
[2].mode
)))
4969 /* Generate the body of an instruction to copy Y into X.
4970 It may be a list of insns, if one insn isn't enough. */
4973 gen_move_insn (rtx x
, rtx y
)
4978 emit_move_insn_1 (x
, y
);
4984 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4985 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4986 no such operation exists, CODE_FOR_nothing will be returned. */
4989 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4993 #ifdef HAVE_ptr_extend
4995 return CODE_FOR_ptr_extend
;
4998 tab
= unsignedp
? zext_optab
: sext_optab
;
4999 return convert_optab_handler (tab
, to_mode
, from_mode
)->insn_code
;
5002 /* Generate the body of an insn to extend Y (with mode MFROM)
5003 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5006 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
5007 enum machine_mode mfrom
, int unsignedp
)
5009 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5010 return GEN_FCN (icode
) (x
, y
);
5013 /* can_fix_p and can_float_p say whether the target machine
5014 can directly convert a given fixed point type to
5015 a given floating point type, or vice versa.
5016 The returned value is the CODE_FOR_... value to use,
5017 or CODE_FOR_nothing if these modes cannot be directly converted.
5019 *TRUNCP_PTR is set to 1 if it is necessary to output
5020 an explicit FTRUNC insn before the fix insn; otherwise 0. */
5022 static enum insn_code
5023 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
5024 int unsignedp
, int *truncp_ptr
)
5027 enum insn_code icode
;
5029 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
5030 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5031 if (icode
!= CODE_FOR_nothing
)
5037 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
5038 for this to work. We need to rework the fix* and ftrunc* patterns
5039 and documentation. */
5040 tab
= unsignedp
? ufix_optab
: sfix_optab
;
5041 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5042 if (icode
!= CODE_FOR_nothing
5043 && optab_handler (ftrunc_optab
, fltmode
)->insn_code
!= CODE_FOR_nothing
)
5050 return CODE_FOR_nothing
;
5053 static enum insn_code
5054 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
5059 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5060 return convert_optab_handler (tab
, fltmode
, fixmode
)->insn_code
;
5063 /* Generate code to convert FROM to floating point
5064 and store in TO. FROM must be fixed point and not VOIDmode.
5065 UNSIGNEDP nonzero means regard FROM as unsigned.
5066 Normally this is done by correcting the final value
5067 if it is negative. */
5070 expand_float (rtx to
, rtx from
, int unsignedp
)
5072 enum insn_code icode
;
5074 enum machine_mode fmode
, imode
;
5075 bool can_do_signed
= false;
5077 /* Crash now, because we won't be able to decide which mode to use. */
5078 gcc_assert (GET_MODE (from
) != VOIDmode
);
5080 /* Look for an insn to do the conversion. Do it in the specified
5081 modes if possible; otherwise convert either input, output or both to
5082 wider mode. If the integer mode is wider than the mode of FROM,
5083 we can do the conversion signed even if the input is unsigned. */
5085 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5086 fmode
= GET_MODE_WIDER_MODE (fmode
))
5087 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
5088 imode
= GET_MODE_WIDER_MODE (imode
))
5090 int doing_unsigned
= unsignedp
;
5092 if (fmode
!= GET_MODE (to
)
5093 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
5096 icode
= can_float_p (fmode
, imode
, unsignedp
);
5097 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5099 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5100 if (scode
!= CODE_FOR_nothing
)
5101 can_do_signed
= true;
5102 if (imode
!= GET_MODE (from
))
5103 icode
= scode
, doing_unsigned
= 0;
5106 if (icode
!= CODE_FOR_nothing
)
5108 if (imode
!= GET_MODE (from
))
5109 from
= convert_to_mode (imode
, from
, unsignedp
);
5111 if (fmode
!= GET_MODE (to
))
5112 target
= gen_reg_rtx (fmode
);
5114 emit_unop_insn (icode
, target
, from
,
5115 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5118 convert_move (to
, target
, 0);
5123 /* Unsigned integer, and no way to convert directly. Convert as signed,
5124 then unconditionally adjust the result. For decimal float values we
5125 do this only if we have already determined that a signed conversion
5126 provides sufficient accuracy. */
5127 if (unsignedp
&& (can_do_signed
|| !DECIMAL_FLOAT_MODE_P (GET_MODE (to
))))
5129 rtx label
= gen_label_rtx ();
5131 REAL_VALUE_TYPE offset
;
5133 /* Look for a usable floating mode FMODE wider than the source and at
5134 least as wide as the target. Using FMODE will avoid rounding woes
5135 with unsigned values greater than the signed maximum value. */
5137 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5138 fmode
= GET_MODE_WIDER_MODE (fmode
))
5139 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
5140 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
5143 if (fmode
== VOIDmode
)
5145 /* There is no such mode. Pretend the target is wide enough. */
5146 fmode
= GET_MODE (to
);
5148 /* Avoid double-rounding when TO is narrower than FROM. */
5149 if ((significand_size (fmode
) + 1)
5150 < GET_MODE_BITSIZE (GET_MODE (from
)))
5153 rtx neglabel
= gen_label_rtx ();
5155 /* Don't use TARGET if it isn't a register, is a hard register,
5156 or is the wrong mode. */
5158 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5159 || GET_MODE (target
) != fmode
)
5160 target
= gen_reg_rtx (fmode
);
5162 imode
= GET_MODE (from
);
5163 do_pending_stack_adjust ();
5165 /* Test whether the sign bit is set. */
5166 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5169 /* The sign bit is not set. Convert as signed. */
5170 expand_float (target
, from
, 0);
5171 emit_jump_insn (gen_jump (label
));
5174 /* The sign bit is set.
5175 Convert to a usable (positive signed) value by shifting right
5176 one bit, while remembering if a nonzero bit was shifted
5177 out; i.e., compute (from & 1) | (from >> 1). */
5179 emit_label (neglabel
);
5180 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5181 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5182 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
5184 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5186 expand_float (target
, temp
, 0);
5188 /* Multiply by 2 to undo the shift above. */
5189 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5190 target
, 0, OPTAB_LIB_WIDEN
);
5192 emit_move_insn (target
, temp
);
5194 do_pending_stack_adjust ();
5200 /* If we are about to do some arithmetic to correct for an
5201 unsigned operand, do it in a pseudo-register. */
5203 if (GET_MODE (to
) != fmode
5204 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5205 target
= gen_reg_rtx (fmode
);
5207 /* Convert as signed integer to floating. */
5208 expand_float (target
, from
, 0);
5210 /* If FROM is negative (and therefore TO is negative),
5211 correct its value by 2**bitwidth. */
5213 do_pending_stack_adjust ();
5214 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5218 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)), fmode
);
5219 temp
= expand_binop (fmode
, add_optab
, target
,
5220 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5221 target
, 0, OPTAB_LIB_WIDEN
);
5223 emit_move_insn (target
, temp
);
5225 do_pending_stack_adjust ();
5230 /* No hardware instruction available; call a library routine. */
5235 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5237 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
5238 from
= convert_to_mode (SImode
, from
, unsignedp
);
5240 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5241 gcc_assert (libfunc
);
5245 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5246 GET_MODE (to
), 1, from
,
5248 insns
= get_insns ();
5251 emit_libcall_block (insns
, target
, value
,
5252 gen_rtx_FLOAT (GET_MODE (to
), from
));
5257 /* Copy result to requested destination
5258 if we have been computing in a temp location. */
5262 if (GET_MODE (target
) == GET_MODE (to
))
5263 emit_move_insn (to
, target
);
5265 convert_move (to
, target
, 0);
5269 /* Generate code to convert FROM to fixed point and store in TO. FROM
5270 must be floating point. */
5273 expand_fix (rtx to
, rtx from
, int unsignedp
)
5275 enum insn_code icode
;
5277 enum machine_mode fmode
, imode
;
5280 /* We first try to find a pair of modes, one real and one integer, at
5281 least as wide as FROM and TO, respectively, in which we can open-code
5282 this conversion. If the integer mode is wider than the mode of TO,
5283 we can do the conversion either signed or unsigned. */
5285 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5286 fmode
= GET_MODE_WIDER_MODE (fmode
))
5287 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5288 imode
= GET_MODE_WIDER_MODE (imode
))
5290 int doing_unsigned
= unsignedp
;
5292 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5293 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5294 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5296 if (icode
!= CODE_FOR_nothing
)
5298 if (fmode
!= GET_MODE (from
))
5299 from
= convert_to_mode (fmode
, from
, 0);
5303 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5304 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5308 if (imode
!= GET_MODE (to
))
5309 target
= gen_reg_rtx (imode
);
5311 emit_unop_insn (icode
, target
, from
,
5312 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5314 convert_move (to
, target
, unsignedp
);
5319 /* For an unsigned conversion, there is one more way to do it.
5320 If we have a signed conversion, we generate code that compares
5321 the real value to the largest representable positive number. If if
5322 is smaller, the conversion is done normally. Otherwise, subtract
5323 one plus the highest signed number, convert, and add it back.
5325 We only need to check all real modes, since we know we didn't find
5326 anything with a wider integer mode.
5328 This code used to extend FP value into mode wider than the destination.
5329 This is needed for decimal float modes which cannot accurately
5330 represent one plus the highest signed number of the same size, but
5331 not for binary modes. Consider, for instance conversion from SFmode
5334 The hot path through the code is dealing with inputs smaller than 2^63
5335 and doing just the conversion, so there is no bits to lose.
5337 In the other path we know the value is positive in the range 2^63..2^64-1
5338 inclusive. (as for other input overflow happens and result is undefined)
5339 So we know that the most important bit set in mantissa corresponds to
5340 2^63. The subtraction of 2^63 should not generate any rounding as it
5341 simply clears out that bit. The rest is trivial. */
5343 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5344 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5345 fmode
= GET_MODE_WIDER_MODE (fmode
))
5346 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5347 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5348 || GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))))
5351 REAL_VALUE_TYPE offset
;
5352 rtx limit
, lab1
, lab2
, insn
;
5354 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5355 real_2expN (&offset
, bitsize
- 1, fmode
);
5356 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5357 lab1
= gen_label_rtx ();
5358 lab2
= gen_label_rtx ();
5360 if (fmode
!= GET_MODE (from
))
5361 from
= convert_to_mode (fmode
, from
, 0);
5363 /* See if we need to do the subtraction. */
5364 do_pending_stack_adjust ();
5365 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5368 /* If not, do the signed "fix" and branch around fixup code. */
5369 expand_fix (to
, from
, 0);
5370 emit_jump_insn (gen_jump (lab2
));
5373 /* Otherwise, subtract 2**(N-1), convert to signed number,
5374 then add 2**(N-1). Do the addition using XOR since this
5375 will often generate better code. */
5377 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5378 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5379 expand_fix (to
, target
, 0);
5380 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5382 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5384 to
, 1, OPTAB_LIB_WIDEN
);
5387 emit_move_insn (to
, target
);
5391 if (optab_handler (mov_optab
, GET_MODE (to
))->insn_code
5392 != CODE_FOR_nothing
)
5394 /* Make a place for a REG_NOTE and add it. */
5395 insn
= emit_move_insn (to
, to
);
5396 set_unique_reg_note (insn
,
5398 gen_rtx_fmt_e (UNSIGNED_FIX
,
5406 /* We can't do it with an insn, so use a library call. But first ensure
5407 that the mode of TO is at least as wide as SImode, since those are the
5408 only library calls we know about. */
5410 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5412 target
= gen_reg_rtx (SImode
);
5414 expand_fix (target
, from
, unsignedp
);
5422 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5423 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5424 gcc_assert (libfunc
);
5428 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5429 GET_MODE (to
), 1, from
,
5431 insns
= get_insns ();
5434 emit_libcall_block (insns
, target
, value
,
5435 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5436 GET_MODE (to
), from
));
5441 if (GET_MODE (to
) == GET_MODE (target
))
5442 emit_move_insn (to
, target
);
5444 convert_move (to
, target
, 0);
5448 /* Generate code to convert FROM to fixed point and store in TO. FROM
5449 must be floating point, TO must be signed. Use the conversion optab
5450 TAB to do the conversion. */
5453 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5455 enum insn_code icode
;
5457 enum machine_mode fmode
, imode
;
5459 /* We first try to find a pair of modes, one real and one integer, at
5460 least as wide as FROM and TO, respectively, in which we can open-code
5461 this conversion. If the integer mode is wider than the mode of TO,
5462 we can do the conversion either signed or unsigned. */
5464 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5465 fmode
= GET_MODE_WIDER_MODE (fmode
))
5466 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5467 imode
= GET_MODE_WIDER_MODE (imode
))
5469 icode
= convert_optab_handler (tab
, imode
, fmode
)->insn_code
;
5470 if (icode
!= CODE_FOR_nothing
)
5472 if (fmode
!= GET_MODE (from
))
5473 from
= convert_to_mode (fmode
, from
, 0);
5475 if (imode
!= GET_MODE (to
))
5476 target
= gen_reg_rtx (imode
);
5478 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
5480 convert_move (to
, target
, 0);
5488 /* Report whether we have an instruction to perform the operation
5489 specified by CODE on operands of mode MODE. */
5491 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5493 return (code_to_optab
[(int) code
] != 0
5494 && (optab_handler (code_to_optab
[(int) code
], mode
)->insn_code
5495 != CODE_FOR_nothing
));
5498 /* Create a blank optab. */
5503 optab op
= xcalloc (sizeof (struct optab
), 1);
5505 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5506 optab_handler (op
, i
)->insn_code
= CODE_FOR_nothing
;
5511 static convert_optab
5512 new_convert_optab (void)
5515 convert_optab op
= xcalloc (sizeof (struct convert_optab
), 1);
5517 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5518 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5519 convert_optab_handler (op
, i
, j
)->insn_code
= CODE_FOR_nothing
;
5524 /* Same, but fill in its code as CODE, and write it into the
5525 code_to_optab table. */
5527 init_optab (enum rtx_code code
)
5529 optab op
= new_optab ();
5531 code_to_optab
[(int) code
] = op
;
5535 /* Same, but fill in its code as CODE, and do _not_ write it into
5536 the code_to_optab table. */
5538 init_optabv (enum rtx_code code
)
5540 optab op
= new_optab ();
5545 /* Conversion optabs never go in the code_to_optab table. */
5546 static inline convert_optab
5547 init_convert_optab (enum rtx_code code
)
5549 convert_optab op
= new_convert_optab ();
5554 /* Initialize the libfunc fields of an entire group of entries in some
5555 optab. Each entry is set equal to a string consisting of a leading
5556 pair of underscores followed by a generic operation name followed by
5557 a mode name (downshifted to lowercase) followed by a single character
5558 representing the number of operands for the given operation (which is
5559 usually one of the characters '2', '3', or '4').
5561 OPTABLE is the table in which libfunc fields are to be initialized.
5562 OPNAME is the generic (string) name of the operation.
5563 SUFFIX is the character which specifies the number of operands for
5564 the given generic operation.
5565 MODE is the mode to generate for.
5569 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5571 unsigned opname_len
= strlen (opname
);
5572 const char *mname
= GET_MODE_NAME (mode
);
5573 unsigned mname_len
= strlen (mname
);
5574 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5581 for (q
= opname
; *q
; )
5583 for (q
= mname
; *q
; q
++)
5584 *p
++ = TOLOWER (*q
);
5588 set_optab_libfunc (optable
, mode
,
5589 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5592 /* Like gen_libfunc, but verify that integer operation is involved. */
5595 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5596 enum machine_mode mode
)
5598 int maxsize
= 2 * BITS_PER_WORD
;
5600 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5602 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5603 maxsize
= LONG_LONG_TYPE_SIZE
;
5604 if (GET_MODE_CLASS (mode
) != MODE_INT
5605 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5607 gen_libfunc (optable
, opname
, suffix
, mode
);
5610 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5613 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5614 enum machine_mode mode
)
5618 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5619 gen_libfunc (optable
, opname
, suffix
, mode
);
5620 if (DECIMAL_FLOAT_MODE_P (mode
))
5622 dec_opname
= alloca (sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5623 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5624 depending on the low level floating format used. */
5625 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5626 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5627 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5631 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5634 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5635 enum machine_mode mode
)
5637 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5638 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5639 if (INTEGRAL_MODE_P (mode
))
5640 gen_int_libfunc (optable
, name
, suffix
, mode
);
5643 /* Like gen_libfunc, but verify that FP or INT operation is involved
5644 and add 'v' suffix for integer operation. */
5647 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5648 enum machine_mode mode
)
5650 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5651 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5652 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5654 int len
= strlen (name
);
5655 char *v_name
= alloca (len
+ 2);
5656 strcpy (v_name
, name
);
5658 v_name
[len
+ 1] = 0;
5659 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5663 /* Initialize the libfunc fields of an entire group of entries of an
5664 inter-mode-class conversion optab. The string formation rules are
5665 similar to the ones for init_libfuncs, above, but instead of having
5666 a mode name and an operand count these functions have two mode names
5667 and no operand count. */
5670 gen_interclass_conv_libfunc (convert_optab tab
,
5672 enum machine_mode tmode
,
5673 enum machine_mode fmode
)
5675 size_t opname_len
= strlen (opname
);
5676 size_t mname_len
= 0;
5678 const char *fname
, *tname
;
5680 char *libfunc_name
, *suffix
;
5681 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5684 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5685 depends on which underlying decimal floating point format is used. */
5686 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5688 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5690 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5691 nondec_name
[0] = '_';
5692 nondec_name
[1] = '_';
5693 memcpy (&nondec_name
[2], opname
, opname_len
);
5694 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5696 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5699 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5700 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5701 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5703 fname
= GET_MODE_NAME (fmode
);
5704 tname
= GET_MODE_NAME (tmode
);
5706 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5708 libfunc_name
= dec_name
;
5709 suffix
= dec_suffix
;
5713 libfunc_name
= nondec_name
;
5714 suffix
= nondec_suffix
;
5718 for (q
= fname
; *q
; p
++, q
++)
5720 for (q
= tname
; *q
; p
++, q
++)
5725 set_conv_libfunc (tab
, tmode
, fmode
,
5726 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5729 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5730 int->fp conversion. */
5733 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5735 enum machine_mode tmode
,
5736 enum machine_mode fmode
)
5738 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5740 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5742 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5745 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5749 gen_ufloat_conv_libfunc (convert_optab tab
,
5750 const char *opname ATTRIBUTE_UNUSED
,
5751 enum machine_mode tmode
,
5752 enum machine_mode fmode
)
5754 if (DECIMAL_FLOAT_MODE_P (tmode
))
5755 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5757 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5760 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5761 fp->int conversion. */
5764 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5766 enum machine_mode tmode
,
5767 enum machine_mode fmode
)
5769 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5771 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5773 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5776 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5777 fp->int conversion with no decimal floating point involved. */
5780 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5782 enum machine_mode tmode
,
5783 enum machine_mode fmode
)
5785 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5787 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5789 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5792 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5793 The string formation rules are
5794 similar to the ones for init_libfunc, above. */
5797 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5798 enum machine_mode tmode
, enum machine_mode fmode
)
5800 size_t opname_len
= strlen (opname
);
5801 size_t mname_len
= 0;
5803 const char *fname
, *tname
;
5805 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5806 char *libfunc_name
, *suffix
;
5809 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5810 depends on which underlying decimal floating point format is used. */
5811 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5813 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5815 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5816 nondec_name
[0] = '_';
5817 nondec_name
[1] = '_';
5818 memcpy (&nondec_name
[2], opname
, opname_len
);
5819 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5821 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5824 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5825 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5826 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5828 fname
= GET_MODE_NAME (fmode
);
5829 tname
= GET_MODE_NAME (tmode
);
5831 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5833 libfunc_name
= dec_name
;
5834 suffix
= dec_suffix
;
5838 libfunc_name
= nondec_name
;
5839 suffix
= nondec_suffix
;
5843 for (q
= fname
; *q
; p
++, q
++)
5845 for (q
= tname
; *q
; p
++, q
++)
5851 set_conv_libfunc (tab
, tmode
, fmode
,
5852 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5855 /* Pick proper libcall for trunc_optab. We need to chose if we do
5856 truncation or extension and interclass or intraclass. */
5859 gen_trunc_conv_libfunc (convert_optab tab
,
5861 enum machine_mode tmode
,
5862 enum machine_mode fmode
)
5864 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5866 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5871 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5872 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5873 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5875 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
5878 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5879 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5880 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5881 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5884 /* Pick proper libcall for extend_optab. We need to chose if we do
5885 truncation or extension and interclass or intraclass. */
5888 gen_extend_conv_libfunc (convert_optab tab
,
5889 const char *opname ATTRIBUTE_UNUSED
,
5890 enum machine_mode tmode
,
5891 enum machine_mode fmode
)
5893 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5895 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5900 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5901 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5902 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5904 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
5907 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5908 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5909 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5910 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5914 init_one_libfunc (const char *name
)
5918 /* Create a FUNCTION_DECL that can be passed to
5919 targetm.encode_section_info. */
5920 /* ??? We don't have any type information except for this is
5921 a function. Pretend this is "int foo()". */
5922 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5923 build_function_type (integer_type_node
, NULL_TREE
));
5924 DECL_ARTIFICIAL (decl
) = 1;
5925 DECL_EXTERNAL (decl
) = 1;
5926 TREE_PUBLIC (decl
) = 1;
5928 symbol
= XEXP (DECL_RTL (decl
), 0);
5930 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5931 are the flags assigned by targetm.encode_section_info. */
5932 SET_SYMBOL_REF_DECL (symbol
, 0);
5937 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5938 MODE to NAME, which should be either 0 or a string constant. */
5940 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5943 struct libfunc_entry e
;
5944 struct libfunc_entry
**slot
;
5945 e
.optab
= (size_t) (optab_table
[0] - optable
);
5950 val
= init_one_libfunc (name
);
5953 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
5955 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
5956 (*slot
)->optab
= (size_t) (optab_table
[0] - optable
);
5957 (*slot
)->mode1
= mode
;
5958 (*slot
)->mode2
= VOIDmode
;
5959 (*slot
)->libfunc
= val
;
5962 /* Call this to reset the function entry for one conversion optab
5963 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5964 either 0 or a string constant. */
5966 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5967 enum machine_mode fmode
, const char *name
)
5970 struct libfunc_entry e
;
5971 struct libfunc_entry
**slot
;
5972 e
.optab
= (size_t) (convert_optab_table
[0] - optable
);
5977 val
= init_one_libfunc (name
);
5980 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
5982 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
5983 (*slot
)->optab
= (size_t) (convert_optab_table
[0] - optable
);
5984 (*slot
)->mode1
= tmode
;
5985 (*slot
)->mode2
= fmode
;
5986 (*slot
)->libfunc
= val
;
5989 /* Call this to initialize the contents of the optabs
5990 appropriately for the current target machine. */
5996 enum machine_mode int_mode
;
5998 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
5999 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6001 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
6002 setcc_gen_code
[i
] = CODE_FOR_nothing
;
6004 #ifdef HAVE_conditional_move
6005 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6006 movcc_gen_code
[i
] = CODE_FOR_nothing
;
6009 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6011 vcond_gen_code
[i
] = CODE_FOR_nothing
;
6012 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
6015 add_optab
= init_optab (PLUS
);
6016 addv_optab
= init_optabv (PLUS
);
6017 sub_optab
= init_optab (MINUS
);
6018 subv_optab
= init_optabv (MINUS
);
6019 smul_optab
= init_optab (MULT
);
6020 smulv_optab
= init_optabv (MULT
);
6021 smul_highpart_optab
= init_optab (UNKNOWN
);
6022 umul_highpart_optab
= init_optab (UNKNOWN
);
6023 smul_widen_optab
= init_optab (UNKNOWN
);
6024 umul_widen_optab
= init_optab (UNKNOWN
);
6025 usmul_widen_optab
= init_optab (UNKNOWN
);
6026 smadd_widen_optab
= init_optab (UNKNOWN
);
6027 umadd_widen_optab
= init_optab (UNKNOWN
);
6028 smsub_widen_optab
= init_optab (UNKNOWN
);
6029 umsub_widen_optab
= init_optab (UNKNOWN
);
6030 sdiv_optab
= init_optab (DIV
);
6031 sdivv_optab
= init_optabv (DIV
);
6032 sdivmod_optab
= init_optab (UNKNOWN
);
6033 udiv_optab
= init_optab (UDIV
);
6034 udivmod_optab
= init_optab (UNKNOWN
);
6035 smod_optab
= init_optab (MOD
);
6036 umod_optab
= init_optab (UMOD
);
6037 fmod_optab
= init_optab (UNKNOWN
);
6038 remainder_optab
= init_optab (UNKNOWN
);
6039 ftrunc_optab
= init_optab (UNKNOWN
);
6040 and_optab
= init_optab (AND
);
6041 ior_optab
= init_optab (IOR
);
6042 xor_optab
= init_optab (XOR
);
6043 ashl_optab
= init_optab (ASHIFT
);
6044 ashr_optab
= init_optab (ASHIFTRT
);
6045 lshr_optab
= init_optab (LSHIFTRT
);
6046 rotl_optab
= init_optab (ROTATE
);
6047 rotr_optab
= init_optab (ROTATERT
);
6048 smin_optab
= init_optab (SMIN
);
6049 smax_optab
= init_optab (SMAX
);
6050 umin_optab
= init_optab (UMIN
);
6051 umax_optab
= init_optab (UMAX
);
6052 pow_optab
= init_optab (UNKNOWN
);
6053 atan2_optab
= init_optab (UNKNOWN
);
6055 /* These three have codes assigned exclusively for the sake of
6057 mov_optab
= init_optab (SET
);
6058 movstrict_optab
= init_optab (STRICT_LOW_PART
);
6059 cmp_optab
= init_optab (COMPARE
);
6061 storent_optab
= init_optab (UNKNOWN
);
6063 ucmp_optab
= init_optab (UNKNOWN
);
6064 tst_optab
= init_optab (UNKNOWN
);
6066 eq_optab
= init_optab (EQ
);
6067 ne_optab
= init_optab (NE
);
6068 gt_optab
= init_optab (GT
);
6069 ge_optab
= init_optab (GE
);
6070 lt_optab
= init_optab (LT
);
6071 le_optab
= init_optab (LE
);
6072 unord_optab
= init_optab (UNORDERED
);
6074 neg_optab
= init_optab (NEG
);
6075 negv_optab
= init_optabv (NEG
);
6076 abs_optab
= init_optab (ABS
);
6077 absv_optab
= init_optabv (ABS
);
6078 addcc_optab
= init_optab (UNKNOWN
);
6079 one_cmpl_optab
= init_optab (NOT
);
6080 bswap_optab
= init_optab (BSWAP
);
6081 ffs_optab
= init_optab (FFS
);
6082 clz_optab
= init_optab (CLZ
);
6083 ctz_optab
= init_optab (CTZ
);
6084 popcount_optab
= init_optab (POPCOUNT
);
6085 parity_optab
= init_optab (PARITY
);
6086 sqrt_optab
= init_optab (SQRT
);
6087 floor_optab
= init_optab (UNKNOWN
);
6088 ceil_optab
= init_optab (UNKNOWN
);
6089 round_optab
= init_optab (UNKNOWN
);
6090 btrunc_optab
= init_optab (UNKNOWN
);
6091 nearbyint_optab
= init_optab (UNKNOWN
);
6092 rint_optab
= init_optab (UNKNOWN
);
6093 sincos_optab
= init_optab (UNKNOWN
);
6094 sin_optab
= init_optab (UNKNOWN
);
6095 asin_optab
= init_optab (UNKNOWN
);
6096 cos_optab
= init_optab (UNKNOWN
);
6097 acos_optab
= init_optab (UNKNOWN
);
6098 exp_optab
= init_optab (UNKNOWN
);
6099 exp10_optab
= init_optab (UNKNOWN
);
6100 exp2_optab
= init_optab (UNKNOWN
);
6101 expm1_optab
= init_optab (UNKNOWN
);
6102 ldexp_optab
= init_optab (UNKNOWN
);
6103 scalb_optab
= init_optab (UNKNOWN
);
6104 logb_optab
= init_optab (UNKNOWN
);
6105 ilogb_optab
= init_optab (UNKNOWN
);
6106 log_optab
= init_optab (UNKNOWN
);
6107 log10_optab
= init_optab (UNKNOWN
);
6108 log2_optab
= init_optab (UNKNOWN
);
6109 log1p_optab
= init_optab (UNKNOWN
);
6110 tan_optab
= init_optab (UNKNOWN
);
6111 atan_optab
= init_optab (UNKNOWN
);
6112 copysign_optab
= init_optab (UNKNOWN
);
6113 signbit_optab
= init_optab (UNKNOWN
);
6115 isinf_optab
= init_optab (UNKNOWN
);
6117 strlen_optab
= init_optab (UNKNOWN
);
6118 cbranch_optab
= init_optab (UNKNOWN
);
6119 cmov_optab
= init_optab (UNKNOWN
);
6120 cstore_optab
= init_optab (UNKNOWN
);
6121 push_optab
= init_optab (UNKNOWN
);
6123 reduc_smax_optab
= init_optab (UNKNOWN
);
6124 reduc_umax_optab
= init_optab (UNKNOWN
);
6125 reduc_smin_optab
= init_optab (UNKNOWN
);
6126 reduc_umin_optab
= init_optab (UNKNOWN
);
6127 reduc_splus_optab
= init_optab (UNKNOWN
);
6128 reduc_uplus_optab
= init_optab (UNKNOWN
);
6130 ssum_widen_optab
= init_optab (UNKNOWN
);
6131 usum_widen_optab
= init_optab (UNKNOWN
);
6132 sdot_prod_optab
= init_optab (UNKNOWN
);
6133 udot_prod_optab
= init_optab (UNKNOWN
);
6135 vec_extract_optab
= init_optab (UNKNOWN
);
6136 vec_extract_even_optab
= init_optab (UNKNOWN
);
6137 vec_extract_odd_optab
= init_optab (UNKNOWN
);
6138 vec_interleave_high_optab
= init_optab (UNKNOWN
);
6139 vec_interleave_low_optab
= init_optab (UNKNOWN
);
6140 vec_set_optab
= init_optab (UNKNOWN
);
6141 vec_init_optab
= init_optab (UNKNOWN
);
6142 vec_shl_optab
= init_optab (UNKNOWN
);
6143 vec_shr_optab
= init_optab (UNKNOWN
);
6144 vec_realign_load_optab
= init_optab (UNKNOWN
);
6145 movmisalign_optab
= init_optab (UNKNOWN
);
6146 vec_widen_umult_hi_optab
= init_optab (UNKNOWN
);
6147 vec_widen_umult_lo_optab
= init_optab (UNKNOWN
);
6148 vec_widen_smult_hi_optab
= init_optab (UNKNOWN
);
6149 vec_widen_smult_lo_optab
= init_optab (UNKNOWN
);
6150 vec_unpacks_hi_optab
= init_optab (UNKNOWN
);
6151 vec_unpacks_lo_optab
= init_optab (UNKNOWN
);
6152 vec_unpacku_hi_optab
= init_optab (UNKNOWN
);
6153 vec_unpacku_lo_optab
= init_optab (UNKNOWN
);
6154 vec_unpacks_float_hi_optab
= init_optab (UNKNOWN
);
6155 vec_unpacks_float_lo_optab
= init_optab (UNKNOWN
);
6156 vec_unpacku_float_hi_optab
= init_optab (UNKNOWN
);
6157 vec_unpacku_float_lo_optab
= init_optab (UNKNOWN
);
6158 vec_pack_trunc_optab
= init_optab (UNKNOWN
);
6159 vec_pack_usat_optab
= init_optab (UNKNOWN
);
6160 vec_pack_ssat_optab
= init_optab (UNKNOWN
);
6161 vec_pack_ufix_trunc_optab
= init_optab (UNKNOWN
);
6162 vec_pack_sfix_trunc_optab
= init_optab (UNKNOWN
);
6164 powi_optab
= init_optab (UNKNOWN
);
6167 sext_optab
= init_convert_optab (SIGN_EXTEND
);
6168 zext_optab
= init_convert_optab (ZERO_EXTEND
);
6169 trunc_optab
= init_convert_optab (TRUNCATE
);
6170 sfix_optab
= init_convert_optab (FIX
);
6171 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
6172 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
6173 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
6174 sfloat_optab
= init_convert_optab (FLOAT
);
6175 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
6176 lrint_optab
= init_convert_optab (UNKNOWN
);
6177 lround_optab
= init_convert_optab (UNKNOWN
);
6178 lfloor_optab
= init_convert_optab (UNKNOWN
);
6179 lceil_optab
= init_convert_optab (UNKNOWN
);
6181 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6183 movmem_optab
[i
] = CODE_FOR_nothing
;
6184 cmpstr_optab
[i
] = CODE_FOR_nothing
;
6185 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
6186 cmpmem_optab
[i
] = CODE_FOR_nothing
;
6187 setmem_optab
[i
] = CODE_FOR_nothing
;
6189 sync_add_optab
[i
] = CODE_FOR_nothing
;
6190 sync_sub_optab
[i
] = CODE_FOR_nothing
;
6191 sync_ior_optab
[i
] = CODE_FOR_nothing
;
6192 sync_and_optab
[i
] = CODE_FOR_nothing
;
6193 sync_xor_optab
[i
] = CODE_FOR_nothing
;
6194 sync_nand_optab
[i
] = CODE_FOR_nothing
;
6195 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
6196 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
6197 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
6198 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
6199 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
6200 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
6201 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
6202 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
6203 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
6204 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
6205 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
6206 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
6207 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
6208 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
6209 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
6210 sync_lock_release
[i
] = CODE_FOR_nothing
;
6212 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
6215 /* Fill in the optabs with the insns we support. */
6218 /* Initialize the optabs with the names of the library functions. */
6219 add_optab
->libcall_basename
= "add";
6220 add_optab
->libcall_suffix
= '3';
6221 add_optab
->libcall_gen
= gen_int_fp_libfunc
;
6222 addv_optab
->libcall_basename
= "add";
6223 addv_optab
->libcall_suffix
= '3';
6224 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6225 sub_optab
->libcall_basename
= "sub";
6226 sub_optab
->libcall_suffix
= '3';
6227 sub_optab
->libcall_gen
= gen_int_fp_libfunc
;
6228 subv_optab
->libcall_basename
= "sub";
6229 subv_optab
->libcall_suffix
= '3';
6230 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6231 smul_optab
->libcall_basename
= "mul";
6232 smul_optab
->libcall_suffix
= '3';
6233 smul_optab
->libcall_gen
= gen_int_fp_libfunc
;
6234 smulv_optab
->libcall_basename
= "mul";
6235 smulv_optab
->libcall_suffix
= '3';
6236 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6237 sdiv_optab
->libcall_basename
= "div";
6238 sdiv_optab
->libcall_suffix
= '3';
6239 sdiv_optab
->libcall_gen
= gen_int_fp_libfunc
;
6240 sdivv_optab
->libcall_basename
= "divv";
6241 sdivv_optab
->libcall_suffix
= '3';
6242 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6243 udiv_optab
->libcall_basename
= "udiv";
6244 udiv_optab
->libcall_suffix
= '3';
6245 udiv_optab
->libcall_gen
= gen_int_libfunc
;
6246 sdivmod_optab
->libcall_basename
= "divmod";
6247 sdivmod_optab
->libcall_suffix
= '4';
6248 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6249 udivmod_optab
->libcall_basename
= "udivmod";
6250 udivmod_optab
->libcall_suffix
= '4';
6251 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6252 smod_optab
->libcall_basename
= "mod";
6253 smod_optab
->libcall_suffix
= '3';
6254 smod_optab
->libcall_gen
= gen_int_libfunc
;
6255 umod_optab
->libcall_basename
= "umod";
6256 umod_optab
->libcall_suffix
= '3';
6257 umod_optab
->libcall_gen
= gen_int_libfunc
;
6258 ftrunc_optab
->libcall_basename
= "ftrunc";
6259 ftrunc_optab
->libcall_suffix
= '2';
6260 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6261 and_optab
->libcall_basename
= "and";
6262 and_optab
->libcall_suffix
= '3';
6263 and_optab
->libcall_gen
= gen_int_libfunc
;
6264 ior_optab
->libcall_basename
= "ior";
6265 ior_optab
->libcall_suffix
= '3';
6266 ior_optab
->libcall_gen
= gen_int_libfunc
;
6267 xor_optab
->libcall_basename
= "xor";
6268 xor_optab
->libcall_suffix
= '3';
6269 xor_optab
->libcall_gen
= gen_int_libfunc
;
6270 ashl_optab
->libcall_basename
= "ashl";
6271 ashl_optab
->libcall_suffix
= '3';
6272 ashl_optab
->libcall_gen
= gen_int_libfunc
;
6273 ashr_optab
->libcall_basename
= "ashr";
6274 ashr_optab
->libcall_suffix
= '3';
6275 ashr_optab
->libcall_gen
= gen_int_libfunc
;
6276 lshr_optab
->libcall_basename
= "lshr";
6277 lshr_optab
->libcall_suffix
= '3';
6278 lshr_optab
->libcall_gen
= gen_int_libfunc
;
6279 smin_optab
->libcall_basename
= "min";
6280 smin_optab
->libcall_suffix
= '3';
6281 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6282 smax_optab
->libcall_basename
= "max";
6283 smax_optab
->libcall_suffix
= '3';
6284 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6285 umin_optab
->libcall_basename
= "umin";
6286 umin_optab
->libcall_suffix
= '3';
6287 umin_optab
->libcall_gen
= gen_int_libfunc
;
6288 umax_optab
->libcall_basename
= "umax";
6289 umax_optab
->libcall_suffix
= '3';
6290 umax_optab
->libcall_gen
= gen_int_libfunc
;
6291 neg_optab
->libcall_basename
= "neg";
6292 neg_optab
->libcall_suffix
= '2';
6293 neg_optab
->libcall_gen
= gen_int_fp_libfunc
;
6294 negv_optab
->libcall_basename
= "neg";
6295 negv_optab
->libcall_suffix
= '2';
6296 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6297 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6298 one_cmpl_optab
->libcall_suffix
= '2';
6299 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6300 ffs_optab
->libcall_basename
= "ffs";
6301 ffs_optab
->libcall_suffix
= '2';
6302 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6303 clz_optab
->libcall_basename
= "clz";
6304 clz_optab
->libcall_suffix
= '2';
6305 clz_optab
->libcall_gen
= gen_int_libfunc
;
6306 ctz_optab
->libcall_basename
= "ctz";
6307 ctz_optab
->libcall_suffix
= '2';
6308 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6309 popcount_optab
->libcall_basename
= "popcount";
6310 popcount_optab
->libcall_suffix
= '2';
6311 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6312 parity_optab
->libcall_basename
= "parity";
6313 parity_optab
->libcall_suffix
= '2';
6314 parity_optab
->libcall_gen
= gen_int_libfunc
;
6316 /* Comparison libcalls for integers MUST come in pairs,
6318 cmp_optab
->libcall_basename
= "cmp";
6319 cmp_optab
->libcall_suffix
= '2';
6320 cmp_optab
->libcall_gen
= gen_int_fp_libfunc
;
6321 ucmp_optab
->libcall_basename
= "ucmp";
6322 ucmp_optab
->libcall_suffix
= '2';
6323 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6325 /* EQ etc are floating point only. */
6326 eq_optab
->libcall_basename
= "eq";
6327 eq_optab
->libcall_suffix
= '2';
6328 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6329 ne_optab
->libcall_basename
= "ne";
6330 ne_optab
->libcall_suffix
= '2';
6331 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6332 gt_optab
->libcall_basename
= "gt";
6333 gt_optab
->libcall_suffix
= '2';
6334 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6335 ge_optab
->libcall_basename
= "ge";
6336 ge_optab
->libcall_suffix
= '2';
6337 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6338 lt_optab
->libcall_basename
= "lt";
6339 lt_optab
->libcall_suffix
= '2';
6340 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6341 le_optab
->libcall_basename
= "le";
6342 le_optab
->libcall_suffix
= '2';
6343 le_optab
->libcall_gen
= gen_fp_libfunc
;
6344 unord_optab
->libcall_basename
= "unord";
6345 unord_optab
->libcall_suffix
= '2';
6346 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6348 powi_optab
->libcall_basename
= "powi";
6349 powi_optab
->libcall_suffix
= '2';
6350 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6353 sfloat_optab
->libcall_basename
= "float";
6354 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6355 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6356 sfix_optab
->libcall_basename
= "fix";
6357 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6358 ufix_optab
->libcall_basename
= "fixuns";
6359 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6360 lrint_optab
->libcall_basename
= "lrint";
6361 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6362 lround_optab
->libcall_basename
= "lround";
6363 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6364 lfloor_optab
->libcall_basename
= "lfloor";
6365 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6366 lceil_optab
->libcall_basename
= "lceil";
6367 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6369 /* trunc_optab is also used for FLOAT_EXTEND. */
6370 sext_optab
->libcall_basename
= "extend";
6371 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6372 trunc_optab
->libcall_basename
= "trunc";
6373 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6375 /* The ffs function operates on `int'. Fall back on it if we do not
6376 have a libgcc2 function for that width. */
6377 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6379 int_mode
= mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0);
6380 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6384 /* Explicitly initialize the bswap libfuncs since we need them to be
6385 valid for things other than word_mode. */
6386 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6387 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6389 /* Use cabs for double complex abs, since systems generally have cabs.
6390 Don't define any libcall for float complex, so that cabs will be used. */
6391 if (complex_double_type_node
)
6392 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6394 abort_libfunc
= init_one_libfunc ("abort");
6395 memcpy_libfunc
= init_one_libfunc ("memcpy");
6396 memmove_libfunc
= init_one_libfunc ("memmove");
6397 memcmp_libfunc
= init_one_libfunc ("memcmp");
6398 memset_libfunc
= init_one_libfunc ("memset");
6399 setbits_libfunc
= init_one_libfunc ("__setbits");
6401 #ifndef DONT_USE_BUILTIN_SETJMP
6402 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6403 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6405 setjmp_libfunc
= init_one_libfunc ("setjmp");
6406 longjmp_libfunc
= init_one_libfunc ("longjmp");
6408 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6409 unwind_sjlj_unregister_libfunc
6410 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6412 /* For function entry/exit instrumentation. */
6413 profile_function_entry_libfunc
6414 = init_one_libfunc ("__cyg_profile_func_enter");
6415 profile_function_exit_libfunc
6416 = init_one_libfunc ("__cyg_profile_func_exit");
6418 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6420 if (HAVE_conditional_trap
)
6421 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
6423 /* Allow the target to add more libcalls or rename some, etc. */
6424 targetm
.init_libfuncs ();
6427 /* Print information about the current contents of the optabs on
6431 debug_optab_libfuncs (void)
6437 /* Dump the arithmetic optabs. */
6438 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6439 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6445 l
= optab_libfunc (optab_table
[i
], j
);
6448 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6449 fprintf (stderr
, "%s\t%s:\t%s\n",
6450 GET_RTX_NAME (o
->code
),
6456 /* Dump the conversion optabs. */
6457 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6458 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6459 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6464 o
= convert_optab_table
[i
];
6465 l
= convert_optab_libfunc (o
, j
, k
);
6468 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6469 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6470 GET_RTX_NAME (o
->code
),
6479 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6480 CODE. Return 0 on failure. */
6483 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
6484 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
6486 enum machine_mode mode
= GET_MODE (op1
);
6487 enum insn_code icode
;
6490 if (!HAVE_conditional_trap
)
6493 if (mode
== VOIDmode
)
6496 icode
= optab_handler (cmp_optab
, mode
)->insn_code
;
6497 if (icode
== CODE_FOR_nothing
)
6501 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
6502 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
6508 emit_insn (GEN_FCN (icode
) (op1
, op2
));
6510 PUT_CODE (trap_rtx
, code
);
6511 gcc_assert (HAVE_conditional_trap
);
6512 insn
= gen_conditional_trap (trap_rtx
, tcode
);
6516 insn
= get_insns ();
6523 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6524 or unsigned operation code. */
6526 static enum rtx_code
6527 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6539 code
= unsignedp
? LTU
: LT
;
6542 code
= unsignedp
? LEU
: LE
;
6545 code
= unsignedp
? GTU
: GT
;
6548 code
= unsignedp
? GEU
: GE
;
6551 case UNORDERED_EXPR
:
6582 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6583 unsigned operators. Do not generate compare instruction. */
6586 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6588 enum rtx_code rcode
;
6590 rtx rtx_op0
, rtx_op1
;
6592 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6593 ensures that condition is a relational operation. */
6594 gcc_assert (COMPARISON_CLASS_P (cond
));
6596 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6597 t_op0
= TREE_OPERAND (cond
, 0);
6598 t_op1
= TREE_OPERAND (cond
, 1);
6600 /* Expand operands. */
6601 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6603 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6606 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
6607 && GET_MODE (rtx_op0
) != VOIDmode
)
6608 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
6610 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
6611 && GET_MODE (rtx_op1
) != VOIDmode
)
6612 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
6614 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
6617 /* Return insn code for VEC_COND_EXPR EXPR. */
6619 static inline enum insn_code
6620 get_vcond_icode (tree expr
, enum machine_mode mode
)
6622 enum insn_code icode
= CODE_FOR_nothing
;
6624 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
6625 icode
= vcondu_gen_code
[mode
];
6627 icode
= vcond_gen_code
[mode
];
6631 /* Return TRUE iff, appropriate vector insns are available
6632 for vector cond expr expr in VMODE mode. */
6635 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
6637 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
6642 /* Generate insns for VEC_COND_EXPR. */
6645 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
6647 enum insn_code icode
;
6648 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
6649 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
6650 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
6652 icode
= get_vcond_icode (vec_cond_expr
, mode
);
6653 if (icode
== CODE_FOR_nothing
)
6656 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6657 target
= gen_reg_rtx (mode
);
6659 /* Get comparison rtx. First expand both cond expr operands. */
6660 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
6662 cc_op0
= XEXP (comparison
, 0);
6663 cc_op1
= XEXP (comparison
, 1);
6664 /* Expand both operands and force them in reg, if required. */
6665 rtx_op1
= expand_normal (TREE_OPERAND (vec_cond_expr
, 1));
6666 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
6667 && mode
!= VOIDmode
)
6668 rtx_op1
= force_reg (mode
, rtx_op1
);
6670 rtx_op2
= expand_normal (TREE_OPERAND (vec_cond_expr
, 2));
6671 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
6672 && mode
!= VOIDmode
)
6673 rtx_op2
= force_reg (mode
, rtx_op2
);
6675 /* Emit instruction! */
6676 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
6677 comparison
, cc_op0
, cc_op1
));
6683 /* This is an internal subroutine of the other compare_and_swap expanders.
6684 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6685 operation. TARGET is an optional place to store the value result of
6686 the operation. ICODE is the particular instruction to expand. Return
6687 the result of the operation. */
6690 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
6691 rtx target
, enum insn_code icode
)
6693 enum machine_mode mode
= GET_MODE (mem
);
6696 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6697 target
= gen_reg_rtx (mode
);
6699 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
6700 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
6701 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
6702 old_val
= force_reg (mode
, old_val
);
6704 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
6705 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
6706 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
6707 new_val
= force_reg (mode
, new_val
);
6709 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
6710 if (insn
== NULL_RTX
)
6717 /* Expand a compare-and-swap operation and return its value. */
6720 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
6722 enum machine_mode mode
= GET_MODE (mem
);
6723 enum insn_code icode
= sync_compare_and_swap
[mode
];
6725 if (icode
== CODE_FOR_nothing
)
6728 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
6731 /* Expand a compare-and-swap operation and store true into the result if
6732 the operation was successful and false otherwise. Return the result.
6733 Unlike other routines, TARGET is not optional. */
6736 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
6738 enum machine_mode mode
= GET_MODE (mem
);
6739 enum insn_code icode
;
6740 rtx subtarget
, label0
, label1
;
6742 /* If the target supports a compare-and-swap pattern that simultaneously
6743 sets some flag for success, then use it. Otherwise use the regular
6744 compare-and-swap and follow that immediately with a compare insn. */
6745 icode
= sync_compare_and_swap_cc
[mode
];
6749 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
6751 if (subtarget
!= NULL_RTX
)
6755 case CODE_FOR_nothing
:
6756 icode
= sync_compare_and_swap
[mode
];
6757 if (icode
== CODE_FOR_nothing
)
6760 /* Ensure that if old_val == mem, that we're not comparing
6761 against an old value. */
6762 if (MEM_P (old_val
))
6763 old_val
= force_reg (mode
, old_val
);
6765 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
6767 if (subtarget
== NULL_RTX
)
6770 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
6773 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6774 setcc instruction from the beginning. We don't work too hard here,
6775 but it's nice to not be stupid about initial code gen either. */
6776 if (STORE_FLAG_VALUE
== 1)
6778 icode
= setcc_gen_code
[EQ
];
6779 if (icode
!= CODE_FOR_nothing
)
6781 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
6785 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
6786 subtarget
= gen_reg_rtx (cmode
);
6788 insn
= GEN_FCN (icode
) (subtarget
);
6792 if (GET_MODE (target
) != GET_MODE (subtarget
))
6794 convert_move (target
, subtarget
, 1);
6802 /* Without an appropriate setcc instruction, use a set of branches to
6803 get 1 and 0 stored into target. Presumably if the target has a
6804 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6806 label0
= gen_label_rtx ();
6807 label1
= gen_label_rtx ();
6809 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
6810 emit_move_insn (target
, const0_rtx
);
6811 emit_jump_insn (gen_jump (label1
));
6813 emit_label (label0
);
6814 emit_move_insn (target
, const1_rtx
);
6815 emit_label (label1
);
6820 /* This is a helper function for the other atomic operations. This function
6821 emits a loop that contains SEQ that iterates until a compare-and-swap
6822 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6823 a set of instructions that takes a value from OLD_REG as an input and
6824 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6825 set to the current contents of MEM. After SEQ, a compare-and-swap will
6826 attempt to update MEM with NEW_REG. The function returns true when the
6827 loop was generated successfully. */
6830 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6832 enum machine_mode mode
= GET_MODE (mem
);
6833 enum insn_code icode
;
6834 rtx label
, cmp_reg
, subtarget
;
6836 /* The loop we want to generate looks like
6842 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6843 if (cmp_reg != old_reg)
6846 Note that we only do the plain load from memory once. Subsequent
6847 iterations use the value loaded by the compare-and-swap pattern. */
6849 label
= gen_label_rtx ();
6850 cmp_reg
= gen_reg_rtx (mode
);
6852 emit_move_insn (cmp_reg
, mem
);
6854 emit_move_insn (old_reg
, cmp_reg
);
6858 /* If the target supports a compare-and-swap pattern that simultaneously
6859 sets some flag for success, then use it. Otherwise use the regular
6860 compare-and-swap and follow that immediately with a compare insn. */
6861 icode
= sync_compare_and_swap_cc
[mode
];
6865 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6867 if (subtarget
!= NULL_RTX
)
6869 gcc_assert (subtarget
== cmp_reg
);
6874 case CODE_FOR_nothing
:
6875 icode
= sync_compare_and_swap
[mode
];
6876 if (icode
== CODE_FOR_nothing
)
6879 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6881 if (subtarget
== NULL_RTX
)
6883 if (subtarget
!= cmp_reg
)
6884 emit_move_insn (cmp_reg
, subtarget
);
6886 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
6889 /* ??? Mark this jump predicted not taken? */
6890 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
6895 /* This function generates the atomic operation MEM CODE= VAL. In this
6896 case, we do not care about any resulting value. Returns NULL if we
6897 cannot generate the operation. */
6900 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
6902 enum machine_mode mode
= GET_MODE (mem
);
6903 enum insn_code icode
;
6906 /* Look to see if the target supports the operation directly. */
6910 icode
= sync_add_optab
[mode
];
6913 icode
= sync_ior_optab
[mode
];
6916 icode
= sync_xor_optab
[mode
];
6919 icode
= sync_and_optab
[mode
];
6922 icode
= sync_nand_optab
[mode
];
6926 icode
= sync_sub_optab
[mode
];
6927 if (icode
== CODE_FOR_nothing
)
6929 icode
= sync_add_optab
[mode
];
6930 if (icode
!= CODE_FOR_nothing
)
6932 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6942 /* Generate the direct operation, if present. */
6943 if (icode
!= CODE_FOR_nothing
)
6945 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6946 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6947 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
6948 val
= force_reg (mode
, val
);
6950 insn
= GEN_FCN (icode
) (mem
, val
);
6958 /* Failing that, generate a compare-and-swap loop in which we perform the
6959 operation with normal arithmetic instructions. */
6960 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6962 rtx t0
= gen_reg_rtx (mode
), t1
;
6969 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6972 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6973 true, OPTAB_LIB_WIDEN
);
6975 insn
= get_insns ();
6978 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6985 /* This function generates the atomic operation MEM CODE= VAL. In this
6986 case, we do care about the resulting value: if AFTER is true then
6987 return the value MEM holds after the operation, if AFTER is false
6988 then return the value MEM holds before the operation. TARGET is an
6989 optional place for the result value to be stored. */
6992 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
6993 bool after
, rtx target
)
6995 enum machine_mode mode
= GET_MODE (mem
);
6996 enum insn_code old_code
, new_code
, icode
;
7000 /* Look to see if the target supports the operation directly. */
7004 old_code
= sync_old_add_optab
[mode
];
7005 new_code
= sync_new_add_optab
[mode
];
7008 old_code
= sync_old_ior_optab
[mode
];
7009 new_code
= sync_new_ior_optab
[mode
];
7012 old_code
= sync_old_xor_optab
[mode
];
7013 new_code
= sync_new_xor_optab
[mode
];
7016 old_code
= sync_old_and_optab
[mode
];
7017 new_code
= sync_new_and_optab
[mode
];
7020 old_code
= sync_old_nand_optab
[mode
];
7021 new_code
= sync_new_nand_optab
[mode
];
7025 old_code
= sync_old_sub_optab
[mode
];
7026 new_code
= sync_new_sub_optab
[mode
];
7027 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
7029 old_code
= sync_old_add_optab
[mode
];
7030 new_code
= sync_new_add_optab
[mode
];
7031 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
7033 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7043 /* If the target does supports the proper new/old operation, great. But
7044 if we only support the opposite old/new operation, check to see if we
7045 can compensate. In the case in which the old value is supported, then
7046 we can always perform the operation again with normal arithmetic. In
7047 the case in which the new value is supported, then we can only handle
7048 this in the case the operation is reversible. */
7053 if (icode
== CODE_FOR_nothing
)
7056 if (icode
!= CODE_FOR_nothing
)
7063 if (icode
== CODE_FOR_nothing
7064 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
7067 if (icode
!= CODE_FOR_nothing
)
7072 /* If we found something supported, great. */
7073 if (icode
!= CODE_FOR_nothing
)
7075 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7076 target
= gen_reg_rtx (mode
);
7078 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7079 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7080 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7081 val
= force_reg (mode
, val
);
7083 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7088 /* If we need to compensate for using an operation with the
7089 wrong return value, do so now. */
7096 else if (code
== MINUS
)
7101 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
7102 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
7103 true, OPTAB_LIB_WIDEN
);
7110 /* Failing that, generate a compare-and-swap loop in which we perform the
7111 operation with normal arithmetic instructions. */
7112 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7114 rtx t0
= gen_reg_rtx (mode
), t1
;
7116 if (!target
|| !register_operand (target
, mode
))
7117 target
= gen_reg_rtx (mode
);
7122 emit_move_insn (target
, t0
);
7126 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7129 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7130 true, OPTAB_LIB_WIDEN
);
7132 emit_move_insn (target
, t1
);
7134 insn
= get_insns ();
7137 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7144 /* This function expands a test-and-set operation. Ideally we atomically
7145 store VAL in MEM and return the previous value in MEM. Some targets
7146 may not support this operation and only support VAL with the constant 1;
7147 in this case while the return value will be 0/1, but the exact value
7148 stored in MEM is target defined. TARGET is an option place to stick
7149 the return value. */
7152 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
7154 enum machine_mode mode
= GET_MODE (mem
);
7155 enum insn_code icode
;
7158 /* If the target supports the test-and-set directly, great. */
7159 icode
= sync_lock_test_and_set
[mode
];
7160 if (icode
!= CODE_FOR_nothing
)
7162 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7163 target
= gen_reg_rtx (mode
);
7165 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7166 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7167 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7168 val
= force_reg (mode
, val
);
7170 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7178 /* Otherwise, use a compare-and-swap loop for the exchange. */
7179 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7181 if (!target
|| !register_operand (target
, mode
))
7182 target
= gen_reg_rtx (mode
);
7183 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7184 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7185 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7192 #include "gt-optabs.h"