1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 #if GCC_VERSION >= 4000
58 __extension__
struct optab optab_table
[OTI_MAX
]
59 = { [0 ... OTI_MAX
- 1].handlers
[0 ... NUM_MACHINE_MODES
- 1].insn_code
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table
[OTI_MAX
];
66 rtx libfunc_table
[LTI_MAX
];
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__
struct convert_optab convert_optab_table
[COI_MAX
]
71 = { [0 ... COI_MAX
- 1].handlers
[0 ... NUM_MACHINE_MODES
- 1]
72 [0 ... NUM_MACHINE_MODES
- 1].insn_code
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table
[COI_MAX
];
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab
[NUM_RTX_CODE
+ 1];
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
85 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
91 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
99 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
102 /* Indexed by the machine mode, gives the insn code for vector conditional
105 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
106 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx
;
113 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
114 enum machine_mode
*, int *);
115 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
129 #define DECIMAL_PREFIX "dpd_"
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct libfunc_entry
GTY(())
138 enum machine_mode mode1
, mode2
;
142 /* Hash table used to convert declarations into nodes. */
143 static GTY((param_is (struct libfunc_entry
))) htab_t libfunc_hash
;
145 /* Used for attribute_hash. */
148 hash_libfunc (const void *p
)
150 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
152 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
156 /* Used for optab_hash. */
159 eq_libfunc (const void *p
, const void *q
)
161 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
162 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
164 return (e1
->optab
== e2
->optab
165 && e1
->mode1
== e2
->mode1
166 && e1
->mode2
== e2
->mode2
);
169 /* Return libfunc corresponding operation defined by OPTAB converting
170 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
171 if no libfunc is available. */
173 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
174 enum machine_mode mode2
)
176 struct libfunc_entry e
;
177 struct libfunc_entry
**slot
;
179 e
.optab
= (size_t) (optab
- &convert_optab_table
[0]);
182 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
185 if (optab
->libcall_gen
)
187 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
188 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
190 return (*slot
)->libfunc
;
196 return (*slot
)->libfunc
;
199 /* Return libfunc corresponding operation defined by OPTAB in MODE.
200 Trigger lazy initialization if needed, return NULL if no libfunc is
203 optab_libfunc (optab optab
, enum machine_mode mode
)
205 struct libfunc_entry e
;
206 struct libfunc_entry
**slot
;
208 e
.optab
= (size_t) (optab
- &optab_table
[0]);
211 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
214 if (optab
->libcall_gen
)
216 optab
->libcall_gen (optab
, optab
->libcall_basename
,
217 optab
->libcall_suffix
, mode
);
218 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
221 return (*slot
)->libfunc
;
227 return (*slot
)->libfunc
;
231 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
232 the result of operation CODE applied to OP0 (and OP1 if it is a binary
235 If the last insn does not set TARGET, don't do anything, but return 1.
237 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
238 don't add the REG_EQUAL note but return 0. Our caller can then try
239 again, ensuring that TARGET is not one of the operands. */
242 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
244 rtx last_insn
, insn
, set
;
247 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
249 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
250 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
251 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
252 && GET_RTX_CLASS (code
) != RTX_COMPARE
253 && GET_RTX_CLASS (code
) != RTX_UNARY
)
256 if (GET_CODE (target
) == ZERO_EXTRACT
)
259 for (last_insn
= insns
;
260 NEXT_INSN (last_insn
) != NULL_RTX
;
261 last_insn
= NEXT_INSN (last_insn
))
264 set
= single_set (last_insn
);
268 if (! rtx_equal_p (SET_DEST (set
), target
)
269 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
270 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
271 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
274 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
275 besides the last insn. */
276 if (reg_overlap_mentioned_p (target
, op0
)
277 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
279 insn
= PREV_INSN (last_insn
);
280 while (insn
!= NULL_RTX
)
282 if (reg_set_p (target
, insn
))
285 insn
= PREV_INSN (insn
);
289 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
290 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
292 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
294 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
299 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
300 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
301 not actually do a sign-extend or zero-extend, but can leave the
302 higher-order bits of the result rtx undefined, for example, in the case
303 of logical operations, but not right shifts. */
306 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
307 int unsignedp
, int no_extend
)
311 /* If we don't have to extend and this is a constant, return it. */
312 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
315 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
316 extend since it will be more efficient to do so unless the signedness of
317 a promoted object differs from our extension. */
319 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
320 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
321 return convert_modes (mode
, oldmode
, op
, unsignedp
);
323 /* If MODE is no wider than a single word, we return a paradoxical
325 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
326 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
328 /* Otherwise, get an object of MODE, clobber it, and set the low-order
331 result
= gen_reg_rtx (mode
);
332 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
333 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
337 /* Return the optab used for computing the operation given by
338 the tree code, CODE. This function is not always usable (for
339 example, it cannot give complete results for multiplication
340 or division) but probably ought to be relied on more widely
341 throughout the expander. */
343 optab_for_tree_code (enum tree_code code
, const_tree type
)
355 return one_cmpl_optab
;
364 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
372 if (TYPE_SATURATING(type
))
373 return TYPE_UNSIGNED(type
) ? usdiv_optab
: ssdiv_optab
;
374 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
377 if (TYPE_SATURATING(type
))
378 return TYPE_UNSIGNED(type
) ? usashl_optab
: ssashl_optab
;
382 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
391 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
394 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
396 case REALIGN_LOAD_EXPR
:
397 return vec_realign_load_optab
;
400 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
403 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
406 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
409 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
411 case REDUC_PLUS_EXPR
:
412 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
414 case VEC_LSHIFT_EXPR
:
415 return vec_shl_optab
;
417 case VEC_RSHIFT_EXPR
:
418 return vec_shr_optab
;
420 case VEC_WIDEN_MULT_HI_EXPR
:
421 return TYPE_UNSIGNED (type
) ?
422 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
424 case VEC_WIDEN_MULT_LO_EXPR
:
425 return TYPE_UNSIGNED (type
) ?
426 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
428 case VEC_UNPACK_HI_EXPR
:
429 return TYPE_UNSIGNED (type
) ?
430 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
432 case VEC_UNPACK_LO_EXPR
:
433 return TYPE_UNSIGNED (type
) ?
434 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
436 case VEC_UNPACK_FLOAT_HI_EXPR
:
437 /* The signedness is determined from input operand. */
438 return TYPE_UNSIGNED (type
) ?
439 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
441 case VEC_UNPACK_FLOAT_LO_EXPR
:
442 /* The signedness is determined from input operand. */
443 return TYPE_UNSIGNED (type
) ?
444 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
446 case VEC_PACK_TRUNC_EXPR
:
447 return vec_pack_trunc_optab
;
449 case VEC_PACK_SAT_EXPR
:
450 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
452 case VEC_PACK_FIX_TRUNC_EXPR
:
453 /* The signedness is determined from output operand. */
454 return TYPE_UNSIGNED (type
) ?
455 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
461 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
464 case POINTER_PLUS_EXPR
:
466 if (TYPE_SATURATING(type
))
467 return TYPE_UNSIGNED(type
) ? usadd_optab
: ssadd_optab
;
468 return trapv
? addv_optab
: add_optab
;
471 if (TYPE_SATURATING(type
))
472 return TYPE_UNSIGNED(type
) ? ussub_optab
: sssub_optab
;
473 return trapv
? subv_optab
: sub_optab
;
476 if (TYPE_SATURATING(type
))
477 return TYPE_UNSIGNED(type
) ? usmul_optab
: ssmul_optab
;
478 return trapv
? smulv_optab
: smul_optab
;
481 if (TYPE_SATURATING(type
))
482 return TYPE_UNSIGNED(type
) ? usneg_optab
: ssneg_optab
;
483 return trapv
? negv_optab
: neg_optab
;
486 return trapv
? absv_optab
: abs_optab
;
488 case VEC_EXTRACT_EVEN_EXPR
:
489 return vec_extract_even_optab
;
491 case VEC_EXTRACT_ODD_EXPR
:
492 return vec_extract_odd_optab
;
494 case VEC_INTERLEAVE_HIGH_EXPR
:
495 return vec_interleave_high_optab
;
497 case VEC_INTERLEAVE_LOW_EXPR
:
498 return vec_interleave_low_optab
;
506 /* Expand vector widening operations.
508 There are two different classes of operations handled here:
509 1) Operations whose result is wider than all the arguments to the operation.
510 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
511 In this case OP0 and optionally OP1 would be initialized,
512 but WIDE_OP wouldn't (not relevant for this case).
513 2) Operations whose result is of the same size as the last argument to the
514 operation, but wider than all the other arguments to the operation.
515 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
516 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
518 E.g, when called to expand the following operations, this is how
519 the arguments will be initialized:
521 widening-sum 2 oprnd0 - oprnd1
522 widening-dot-product 3 oprnd0 oprnd1 oprnd2
523 widening-mult 2 oprnd0 oprnd1 -
524 type-promotion (vec-unpack) 1 oprnd0 - - */
527 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
530 tree oprnd0
, oprnd1
, oprnd2
;
531 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
532 optab widen_pattern_optab
;
534 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
537 rtx xop0
, xop1
, wxop
;
538 int nops
= TREE_OPERAND_LENGTH (exp
);
540 oprnd0
= TREE_OPERAND (exp
, 0);
541 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
542 widen_pattern_optab
=
543 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
544 icode
= (int) optab_handler (widen_pattern_optab
, tmode0
)->insn_code
;
545 gcc_assert (icode
!= CODE_FOR_nothing
);
546 xmode0
= insn_data
[icode
].operand
[1].mode
;
550 oprnd1
= TREE_OPERAND (exp
, 1);
551 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
552 xmode1
= insn_data
[icode
].operand
[2].mode
;
555 /* The last operand is of a wider mode than the rest of the operands. */
563 gcc_assert (tmode1
== tmode0
);
565 oprnd2
= TREE_OPERAND (exp
, 2);
566 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
567 wxmode
= insn_data
[icode
].operand
[3].mode
;
571 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
574 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
575 temp
= gen_reg_rtx (wmode
);
583 /* In case the insn wants input operands in modes different from
584 those of the actual operands, convert the operands. It would
585 seem that we don't need to convert CONST_INTs, but we do, so
586 that they're properly zero-extended, sign-extended or truncated
589 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
590 xop0
= convert_modes (xmode0
,
591 GET_MODE (op0
) != VOIDmode
597 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
598 xop1
= convert_modes (xmode1
,
599 GET_MODE (op1
) != VOIDmode
605 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
606 wxop
= convert_modes (wxmode
,
607 GET_MODE (wide_op
) != VOIDmode
612 /* Now, if insn's predicates don't allow our operands, put them into
615 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
616 && xmode0
!= VOIDmode
)
617 xop0
= copy_to_mode_reg (xmode0
, xop0
);
621 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
622 && xmode1
!= VOIDmode
)
623 xop1
= copy_to_mode_reg (xmode1
, xop1
);
627 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
628 && wxmode
!= VOIDmode
)
629 wxop
= copy_to_mode_reg (wxmode
, wxop
);
631 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
634 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
640 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
641 && wxmode
!= VOIDmode
)
642 wxop
= copy_to_mode_reg (wxmode
, wxop
);
644 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
647 pat
= GEN_FCN (icode
) (temp
, xop0
);
654 /* Generate code to perform an operation specified by TERNARY_OPTAB
655 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
657 UNSIGNEDP is for the case where we have to widen the operands
658 to perform the operation. It says to use zero-extension.
660 If TARGET is nonzero, the value
661 is generated there, if it is convenient to do so.
662 In all cases an rtx is returned for the locus of the value;
663 this may or may not be TARGET. */
666 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
667 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
669 int icode
= (int) optab_handler (ternary_optab
, mode
)->insn_code
;
670 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
671 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
672 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
675 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
677 gcc_assert (optab_handler (ternary_optab
, mode
)->insn_code
678 != CODE_FOR_nothing
);
680 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
681 temp
= gen_reg_rtx (mode
);
685 /* In case the insn wants input operands in modes different from
686 those of the actual operands, convert the operands. It would
687 seem that we don't need to convert CONST_INTs, but we do, so
688 that they're properly zero-extended, sign-extended or truncated
691 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
692 xop0
= convert_modes (mode0
,
693 GET_MODE (op0
) != VOIDmode
698 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
699 xop1
= convert_modes (mode1
,
700 GET_MODE (op1
) != VOIDmode
705 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
706 xop2
= convert_modes (mode2
,
707 GET_MODE (op2
) != VOIDmode
712 /* Now, if insn's predicates don't allow our operands, put them into
715 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
716 && mode0
!= VOIDmode
)
717 xop0
= copy_to_mode_reg (mode0
, xop0
);
719 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
720 && mode1
!= VOIDmode
)
721 xop1
= copy_to_mode_reg (mode1
, xop1
);
723 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
724 && mode2
!= VOIDmode
)
725 xop2
= copy_to_mode_reg (mode2
, xop2
);
727 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
734 /* Like expand_binop, but return a constant rtx if the result can be
735 calculated at compile time. The arguments and return value are
736 otherwise the same as for expand_binop. */
739 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
740 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
741 enum optab_methods methods
)
743 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
745 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
751 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
754 /* Like simplify_expand_binop, but always put the result in TARGET.
755 Return true if the expansion succeeded. */
758 force_expand_binop (enum machine_mode mode
, optab binoptab
,
759 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
760 enum optab_methods methods
)
762 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
763 target
, unsignedp
, methods
);
767 emit_move_insn (target
, x
);
771 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
774 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
776 enum insn_code icode
;
777 rtx rtx_op1
, rtx_op2
;
778 enum machine_mode mode1
;
779 enum machine_mode mode2
;
780 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
781 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
782 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
786 switch (TREE_CODE (vec_shift_expr
))
788 case VEC_RSHIFT_EXPR
:
789 shift_optab
= vec_shr_optab
;
791 case VEC_LSHIFT_EXPR
:
792 shift_optab
= vec_shl_optab
;
798 icode
= (int) optab_handler (shift_optab
, mode
)->insn_code
;
799 gcc_assert (icode
!= CODE_FOR_nothing
);
801 mode1
= insn_data
[icode
].operand
[1].mode
;
802 mode2
= insn_data
[icode
].operand
[2].mode
;
804 rtx_op1
= expand_normal (vec_oprnd
);
805 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
806 && mode1
!= VOIDmode
)
807 rtx_op1
= force_reg (mode1
, rtx_op1
);
809 rtx_op2
= expand_normal (shift_oprnd
);
810 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
811 && mode2
!= VOIDmode
)
812 rtx_op2
= force_reg (mode2
, rtx_op2
);
815 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
816 target
= gen_reg_rtx (mode
);
818 /* Emit instruction */
819 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
826 /* This subroutine of expand_doubleword_shift handles the cases in which
827 the effective shift value is >= BITS_PER_WORD. The arguments and return
828 value are the same as for the parent routine, except that SUPERWORD_OP1
829 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
830 INTO_TARGET may be null if the caller has decided to calculate it. */
833 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
834 rtx outof_target
, rtx into_target
,
835 int unsignedp
, enum optab_methods methods
)
837 if (into_target
!= 0)
838 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
839 into_target
, unsignedp
, methods
))
842 if (outof_target
!= 0)
844 /* For a signed right shift, we must fill OUTOF_TARGET with copies
845 of the sign bit, otherwise we must fill it with zeros. */
846 if (binoptab
!= ashr_optab
)
847 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
849 if (!force_expand_binop (word_mode
, binoptab
,
850 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
851 outof_target
, unsignedp
, methods
))
857 /* This subroutine of expand_doubleword_shift handles the cases in which
858 the effective shift value is < BITS_PER_WORD. The arguments and return
859 value are the same as for the parent routine. */
862 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
863 rtx outof_input
, rtx into_input
, rtx op1
,
864 rtx outof_target
, rtx into_target
,
865 int unsignedp
, enum optab_methods methods
,
866 unsigned HOST_WIDE_INT shift_mask
)
868 optab reverse_unsigned_shift
, unsigned_shift
;
871 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
872 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
874 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
875 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
876 the opposite direction to BINOPTAB. */
877 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
879 carries
= outof_input
;
880 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
881 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
886 /* We must avoid shifting by BITS_PER_WORD bits since that is either
887 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
888 has unknown behavior. Do a single shift first, then shift by the
889 remainder. It's OK to use ~OP1 as the remainder if shift counts
890 are truncated to the mode size. */
891 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
892 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
893 if (shift_mask
== BITS_PER_WORD
- 1)
895 tmp
= immed_double_const (-1, -1, op1_mode
);
896 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
901 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
902 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
906 if (tmp
== 0 || carries
== 0)
908 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
909 carries
, tmp
, 0, unsignedp
, methods
);
913 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
914 so the result can go directly into INTO_TARGET if convenient. */
915 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
916 into_target
, unsignedp
, methods
);
920 /* Now OR in the bits carried over from OUTOF_INPUT. */
921 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
922 into_target
, unsignedp
, methods
))
925 /* Use a standard word_mode shift for the out-of half. */
926 if (outof_target
!= 0)
927 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
928 outof_target
, unsignedp
, methods
))
935 #ifdef HAVE_conditional_move
936 /* Try implementing expand_doubleword_shift using conditional moves.
937 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
938 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
939 are the shift counts to use in the former and latter case. All other
940 arguments are the same as the parent routine. */
943 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
944 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
945 rtx outof_input
, rtx into_input
,
946 rtx subword_op1
, rtx superword_op1
,
947 rtx outof_target
, rtx into_target
,
948 int unsignedp
, enum optab_methods methods
,
949 unsigned HOST_WIDE_INT shift_mask
)
951 rtx outof_superword
, into_superword
;
953 /* Put the superword version of the output into OUTOF_SUPERWORD and
955 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
956 if (outof_target
!= 0 && subword_op1
== superword_op1
)
958 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
959 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
960 into_superword
= outof_target
;
961 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
962 outof_superword
, 0, unsignedp
, methods
))
967 into_superword
= gen_reg_rtx (word_mode
);
968 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
969 outof_superword
, into_superword
,
974 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
975 if (!expand_subword_shift (op1_mode
, binoptab
,
976 outof_input
, into_input
, subword_op1
,
977 outof_target
, into_target
,
978 unsignedp
, methods
, shift_mask
))
981 /* Select between them. Do the INTO half first because INTO_SUPERWORD
982 might be the current value of OUTOF_TARGET. */
983 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
984 into_target
, into_superword
, word_mode
, false))
987 if (outof_target
!= 0)
988 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
989 outof_target
, outof_superword
,
997 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
998 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
999 input operand; the shift moves bits in the direction OUTOF_INPUT->
1000 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1001 of the target. OP1 is the shift count and OP1_MODE is its mode.
1002 If OP1 is constant, it will have been truncated as appropriate
1003 and is known to be nonzero.
1005 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1006 shift count is outside the range [0, BITS_PER_WORD). This routine must
1007 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1009 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1010 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1011 fill with zeros or sign bits as appropriate.
1013 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1014 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1015 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1016 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1019 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1020 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1021 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1022 function wants to calculate it itself.
1024 Return true if the shift could be successfully synthesized. */
1027 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
1028 rtx outof_input
, rtx into_input
, rtx op1
,
1029 rtx outof_target
, rtx into_target
,
1030 int unsignedp
, enum optab_methods methods
,
1031 unsigned HOST_WIDE_INT shift_mask
)
1033 rtx superword_op1
, tmp
, cmp1
, cmp2
;
1034 rtx subword_label
, done_label
;
1035 enum rtx_code cmp_code
;
1037 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1038 fill the result with sign or zero bits as appropriate. If so, the value
1039 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1040 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1041 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1043 This isn't worthwhile for constant shifts since the optimizers will
1044 cope better with in-range shift counts. */
1045 if (shift_mask
>= BITS_PER_WORD
1046 && outof_target
!= 0
1047 && !CONSTANT_P (op1
))
1049 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1050 outof_input
, into_input
, op1
,
1052 unsignedp
, methods
, shift_mask
))
1054 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1055 outof_target
, unsignedp
, methods
))
1060 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1061 is true when the effective shift value is less than BITS_PER_WORD.
1062 Set SUPERWORD_OP1 to the shift count that should be used to shift
1063 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1064 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1065 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1067 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1068 is a subword shift count. */
1069 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1071 cmp2
= CONST0_RTX (op1_mode
);
1073 superword_op1
= op1
;
1077 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1078 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1080 cmp2
= CONST0_RTX (op1_mode
);
1082 superword_op1
= cmp1
;
1087 /* If we can compute the condition at compile time, pick the
1088 appropriate subroutine. */
1089 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1090 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
1092 if (tmp
== const0_rtx
)
1093 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1094 outof_target
, into_target
,
1095 unsignedp
, methods
);
1097 return expand_subword_shift (op1_mode
, binoptab
,
1098 outof_input
, into_input
, op1
,
1099 outof_target
, into_target
,
1100 unsignedp
, methods
, shift_mask
);
1103 #ifdef HAVE_conditional_move
1104 /* Try using conditional moves to generate straight-line code. */
1106 rtx start
= get_last_insn ();
1107 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1108 cmp_code
, cmp1
, cmp2
,
1109 outof_input
, into_input
,
1111 outof_target
, into_target
,
1112 unsignedp
, methods
, shift_mask
))
1114 delete_insns_since (start
);
1118 /* As a last resort, use branches to select the correct alternative. */
1119 subword_label
= gen_label_rtx ();
1120 done_label
= gen_label_rtx ();
1123 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1124 0, 0, subword_label
);
1127 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1128 outof_target
, into_target
,
1129 unsignedp
, methods
))
1132 emit_jump_insn (gen_jump (done_label
));
1134 emit_label (subword_label
);
1136 if (!expand_subword_shift (op1_mode
, binoptab
,
1137 outof_input
, into_input
, op1
,
1138 outof_target
, into_target
,
1139 unsignedp
, methods
, shift_mask
))
1142 emit_label (done_label
);
1146 /* Subroutine of expand_binop. Perform a double word multiplication of
1147 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1148 as the target's word_mode. This function return NULL_RTX if anything
1149 goes wrong, in which case it may have already emitted instructions
1150 which need to be deleted.
1152 If we want to multiply two two-word values and have normal and widening
1153 multiplies of single-word values, we can do this with three smaller
1154 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1155 because we are not operating on one word at a time.
1157 The multiplication proceeds as follows:
1158 _______________________
1159 [__op0_high_|__op0_low__]
1160 _______________________
1161 * [__op1_high_|__op1_low__]
1162 _______________________________________________
1163 _______________________
1164 (1) [__op0_low__*__op1_low__]
1165 _______________________
1166 (2a) [__op0_low__*__op1_high_]
1167 _______________________
1168 (2b) [__op0_high_*__op1_low__]
1169 _______________________
1170 (3) [__op0_high_*__op1_high_]
1173 This gives a 4-word result. Since we are only interested in the
1174 lower 2 words, partial result (3) and the upper words of (2a) and
1175 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1176 calculated using non-widening multiplication.
1178 (1), however, needs to be calculated with an unsigned widening
1179 multiplication. If this operation is not directly supported we
1180 try using a signed widening multiplication and adjust the result.
1181 This adjustment works as follows:
1183 If both operands are positive then no adjustment is needed.
1185 If the operands have different signs, for example op0_low < 0 and
1186 op1_low >= 0, the instruction treats the most significant bit of
1187 op0_low as a sign bit instead of a bit with significance
1188 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1189 with 2**BITS_PER_WORD - op0_low, and two's complements the
1190 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1193 Similarly, if both operands are negative, we need to add
1194 (op0_low + op1_low) * 2**BITS_PER_WORD.
1196 We use a trick to adjust quickly. We logically shift op0_low right
1197 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1198 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1199 logical shift exists, we do an arithmetic right shift and subtract
1203 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1204 bool umulp
, enum optab_methods methods
)
1206 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1207 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1208 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1209 rtx product
, adjust
, product_high
, temp
;
1211 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1212 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1213 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1214 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1216 /* If we're using an unsigned multiply to directly compute the product
1217 of the low-order words of the operands and perform any required
1218 adjustments of the operands, we begin by trying two more multiplications
1219 and then computing the appropriate sum.
1221 We have checked above that the required addition is provided.
1222 Full-word addition will normally always succeed, especially if
1223 it is provided at all, so we don't worry about its failure. The
1224 multiplication may well fail, however, so we do handle that. */
1228 /* ??? This could be done with emit_store_flag where available. */
1229 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1230 NULL_RTX
, 1, methods
);
1232 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1233 NULL_RTX
, 0, OPTAB_DIRECT
);
1236 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1237 NULL_RTX
, 0, methods
);
1240 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1241 NULL_RTX
, 0, OPTAB_DIRECT
);
1248 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1249 NULL_RTX
, 0, OPTAB_DIRECT
);
1253 /* OP0_HIGH should now be dead. */
1257 /* ??? This could be done with emit_store_flag where available. */
1258 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1259 NULL_RTX
, 1, methods
);
1261 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1262 NULL_RTX
, 0, OPTAB_DIRECT
);
1265 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1266 NULL_RTX
, 0, methods
);
1269 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1270 NULL_RTX
, 0, OPTAB_DIRECT
);
1277 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1278 NULL_RTX
, 0, OPTAB_DIRECT
);
1282 /* OP1_HIGH should now be dead. */
1284 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1285 adjust
, 0, OPTAB_DIRECT
);
1287 if (target
&& !REG_P (target
))
1291 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1292 target
, 1, OPTAB_DIRECT
);
1294 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1295 target
, 1, OPTAB_DIRECT
);
1300 product_high
= operand_subword (product
, high
, 1, mode
);
1301 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1302 REG_P (product_high
) ? product_high
: adjust
,
1304 emit_move_insn (product_high
, adjust
);
1308 /* Wrapper around expand_binop which takes an rtx code to specify
1309 the operation to perform, not an optab pointer. All other
1310 arguments are the same. */
1312 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1313 rtx op1
, rtx target
, int unsignedp
,
1314 enum optab_methods methods
)
1316 optab binop
= code_to_optab
[(int) code
];
1319 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1322 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1323 binop. Order them according to commutative_operand_precedence and, if
1324 possible, try to put TARGET or a pseudo first. */
1326 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1328 int op0_prec
= commutative_operand_precedence (op0
);
1329 int op1_prec
= commutative_operand_precedence (op1
);
1331 if (op0_prec
< op1_prec
)
1334 if (op0_prec
> op1_prec
)
1337 /* With equal precedence, both orders are ok, but it is better if the
1338 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1339 if (target
== 0 || REG_P (target
))
1340 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1342 return rtx_equal_p (op1
, target
);
1345 /* Return true if BINOPTAB implements a shift operation. */
1348 shift_optab_p (optab binoptab
)
1350 switch (binoptab
->code
)
1366 /* Return true if BINOPTAB implements a commutative binary operation. */
1369 commutative_optab_p (optab binoptab
)
1371 return (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1372 || binoptab
== smul_widen_optab
1373 || binoptab
== umul_widen_optab
1374 || binoptab
== smul_highpart_optab
1375 || binoptab
== umul_highpart_optab
);
1378 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1379 optimizing, and if the operand is a constant that costs more than
1380 1 instruction, force the constant into a register and return that
1381 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1384 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1385 rtx x
, bool unsignedp
)
1387 if (mode
!= VOIDmode
1390 && rtx_cost (x
, binoptab
->code
) > COSTS_N_INSNS (1))
1392 if (GET_CODE (x
) == CONST_INT
)
1394 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1395 if (intval
!= INTVAL (x
))
1396 x
= GEN_INT (intval
);
1399 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1400 x
= force_reg (mode
, x
);
1405 /* Helper function for expand_binop: handle the case where there
1406 is an insn that directly implements the indicated operation.
1407 Returns null if this is not possible. */
1409 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1411 rtx target
, int unsignedp
, enum optab_methods methods
,
1414 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
1415 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1416 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1417 enum machine_mode tmp_mode
;
1420 rtx xop0
= op0
, xop1
= op1
;
1427 temp
= gen_reg_rtx (mode
);
1429 /* If it is a commutative operator and the modes would match
1430 if we would swap the operands, we can save the conversions. */
1431 commutative_p
= commutative_optab_p (binoptab
);
1433 && GET_MODE (xop0
) != mode0
&& GET_MODE (xop1
) != mode1
1434 && GET_MODE (xop0
) == mode1
&& GET_MODE (xop1
) == mode1
)
1441 /* If we are optimizing, force expensive constants into a register. */
1442 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
1443 if (!shift_optab_p (binoptab
))
1444 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
1446 /* In case the insn wants input operands in modes different from
1447 those of the actual operands, convert the operands. It would
1448 seem that we don't need to convert CONST_INTs, but we do, so
1449 that they're properly zero-extended, sign-extended or truncated
1452 if (GET_MODE (xop0
) != mode0
&& mode0
!= VOIDmode
)
1453 xop0
= convert_modes (mode0
,
1454 GET_MODE (xop0
) != VOIDmode
1459 if (GET_MODE (xop1
) != mode1
&& mode1
!= VOIDmode
)
1460 xop1
= convert_modes (mode1
,
1461 GET_MODE (xop1
) != VOIDmode
1466 /* If operation is commutative,
1467 try to make the first operand a register.
1468 Even better, try to make it the same as the target.
1469 Also try to make the last operand a constant. */
1471 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1478 /* Now, if insn's predicates don't allow our operands, put them into
1481 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1482 && mode0
!= VOIDmode
)
1483 xop0
= copy_to_mode_reg (mode0
, xop0
);
1485 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1486 && mode1
!= VOIDmode
)
1487 xop1
= copy_to_mode_reg (mode1
, xop1
);
1489 if (binoptab
== vec_pack_trunc_optab
1490 || binoptab
== vec_pack_usat_optab
1491 || binoptab
== vec_pack_ssat_optab
1492 || binoptab
== vec_pack_ufix_trunc_optab
1493 || binoptab
== vec_pack_sfix_trunc_optab
)
1495 /* The mode of the result is different then the mode of the
1497 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1498 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1504 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1505 temp
= gen_reg_rtx (tmp_mode
);
1507 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1510 /* If PAT is composed of more than one insn, try to add an appropriate
1511 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1512 operand, call expand_binop again, this time without a target. */
1513 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1514 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1516 delete_insns_since (last
);
1517 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1518 unsignedp
, methods
);
1525 delete_insns_since (last
);
1529 /* Generate code to perform an operation specified by BINOPTAB
1530 on operands OP0 and OP1, with result having machine-mode MODE.
1532 UNSIGNEDP is for the case where we have to widen the operands
1533 to perform the operation. It says to use zero-extension.
1535 If TARGET is nonzero, the value
1536 is generated there, if it is convenient to do so.
1537 In all cases an rtx is returned for the locus of the value;
1538 this may or may not be TARGET. */
1541 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1542 rtx target
, int unsignedp
, enum optab_methods methods
)
1544 enum optab_methods next_methods
1545 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1546 ? OPTAB_WIDEN
: methods
);
1547 enum mode_class
class;
1548 enum machine_mode wider_mode
;
1551 rtx entry_last
= get_last_insn ();
1554 class = GET_MODE_CLASS (mode
);
1556 /* If subtracting an integer constant, convert this into an addition of
1557 the negated constant. */
1559 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1561 op1
= negate_rtx (mode
, op1
);
1562 binoptab
= add_optab
;
1565 /* Record where to delete back to if we backtrack. */
1566 last
= get_last_insn ();
1568 /* If we can do it with a three-operand insn, do so. */
1570 if (methods
!= OPTAB_MUST_WIDEN
1571 && optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1573 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1574 unsignedp
, methods
, last
);
1579 /* If we were trying to rotate, and that didn't work, try rotating
1580 the other direction before falling back to shifts and bitwise-or. */
1581 if (((binoptab
== rotl_optab
1582 && optab_handler (rotr_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1583 || (binoptab
== rotr_optab
1584 && optab_handler (rotl_optab
, mode
)->insn_code
!= CODE_FOR_nothing
))
1585 && class == MODE_INT
)
1587 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1589 unsigned int bits
= GET_MODE_BITSIZE (mode
);
1591 if (GET_CODE (op1
) == CONST_INT
)
1592 newop1
= GEN_INT (bits
- INTVAL (op1
));
1593 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1594 newop1
= negate_rtx (mode
, op1
);
1596 newop1
= expand_binop (mode
, sub_optab
,
1597 GEN_INT (bits
), op1
,
1598 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1600 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1601 target
, unsignedp
, methods
, last
);
1606 /* If this is a multiply, see if we can do a widening operation that
1607 takes operands of this mode and makes a wider mode. */
1609 if (binoptab
== smul_optab
1610 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1611 && ((optab_handler ((unsignedp
? umul_widen_optab
: smul_widen_optab
),
1612 GET_MODE_WIDER_MODE (mode
))->insn_code
)
1613 != CODE_FOR_nothing
))
1615 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1616 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1617 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1621 if (GET_MODE_CLASS (mode
) == MODE_INT
1622 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1623 GET_MODE_BITSIZE (GET_MODE (temp
))))
1624 return gen_lowpart (mode
, temp
);
1626 return convert_to_mode (mode
, temp
, unsignedp
);
1630 /* Look for a wider mode of the same class for which we think we
1631 can open-code the operation. Check for a widening multiply at the
1632 wider mode as well. */
1634 if (CLASS_HAS_WIDER_MODES_P (class)
1635 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1636 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1637 wider_mode
!= VOIDmode
;
1638 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1640 if (optab_handler (binoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
1641 || (binoptab
== smul_optab
1642 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1643 && ((optab_handler ((unsignedp
? umul_widen_optab
1644 : smul_widen_optab
),
1645 GET_MODE_WIDER_MODE (wider_mode
))->insn_code
)
1646 != CODE_FOR_nothing
)))
1648 rtx xop0
= op0
, xop1
= op1
;
1651 /* For certain integer operations, we need not actually extend
1652 the narrow operands, as long as we will truncate
1653 the results to the same narrowness. */
1655 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1656 || binoptab
== xor_optab
1657 || binoptab
== add_optab
|| binoptab
== sub_optab
1658 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1659 && class == MODE_INT
)
1662 xop0
= avoid_expensive_constant (mode
, binoptab
,
1664 if (binoptab
!= ashl_optab
)
1665 xop1
= avoid_expensive_constant (mode
, binoptab
,
1669 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1671 /* The second operand of a shift must always be extended. */
1672 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1673 no_extend
&& binoptab
!= ashl_optab
);
1675 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1676 unsignedp
, OPTAB_DIRECT
);
1679 if (class != MODE_INT
1680 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1681 GET_MODE_BITSIZE (wider_mode
)))
1684 target
= gen_reg_rtx (mode
);
1685 convert_move (target
, temp
, 0);
1689 return gen_lowpart (mode
, temp
);
1692 delete_insns_since (last
);
1696 /* If operation is commutative,
1697 try to make the first operand a register.
1698 Even better, try to make it the same as the target.
1699 Also try to make the last operand a constant. */
1700 if (commutative_optab_p (binoptab
)
1701 && swap_commutative_operands_with_target (target
, op0
, op1
))
1708 /* These can be done a word at a time. */
1709 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1710 && class == MODE_INT
1711 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1712 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1718 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1719 won't be accurate, so use a new target. */
1720 if (target
== 0 || target
== op0
|| target
== op1
)
1721 target
= gen_reg_rtx (mode
);
1725 /* Do the actual arithmetic. */
1726 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1728 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1729 rtx x
= expand_binop (word_mode
, binoptab
,
1730 operand_subword_force (op0
, i
, mode
),
1731 operand_subword_force (op1
, i
, mode
),
1732 target_piece
, unsignedp
, next_methods
);
1737 if (target_piece
!= x
)
1738 emit_move_insn (target_piece
, x
);
1741 insns
= get_insns ();
1744 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1746 if (binoptab
->code
!= UNKNOWN
)
1748 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1749 copy_rtx (op0
), copy_rtx (op1
));
1753 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1758 /* Synthesize double word shifts from single word shifts. */
1759 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1760 || binoptab
== ashr_optab
)
1761 && class == MODE_INT
1762 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1763 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1764 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1765 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1766 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1768 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1769 enum machine_mode op1_mode
;
1771 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1772 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1773 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1775 /* Apply the truncation to constant shifts. */
1776 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1777 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1779 if (op1
== CONST0_RTX (op1_mode
))
1782 /* Make sure that this is a combination that expand_doubleword_shift
1783 can handle. See the comments there for details. */
1784 if (double_shift_mask
== 0
1785 || (shift_mask
== BITS_PER_WORD
- 1
1786 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1788 rtx insns
, equiv_value
;
1789 rtx into_target
, outof_target
;
1790 rtx into_input
, outof_input
;
1791 int left_shift
, outof_word
;
1793 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1794 won't be accurate, so use a new target. */
1795 if (target
== 0 || target
== op0
|| target
== op1
)
1796 target
= gen_reg_rtx (mode
);
1800 /* OUTOF_* is the word we are shifting bits away from, and
1801 INTO_* is the word that we are shifting bits towards, thus
1802 they differ depending on the direction of the shift and
1803 WORDS_BIG_ENDIAN. */
1805 left_shift
= binoptab
== ashl_optab
;
1806 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1808 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1809 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1811 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1812 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1814 if (expand_doubleword_shift (op1_mode
, binoptab
,
1815 outof_input
, into_input
, op1
,
1816 outof_target
, into_target
,
1817 unsignedp
, next_methods
, shift_mask
))
1819 insns
= get_insns ();
1822 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1823 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1830 /* Synthesize double word rotates from single word shifts. */
1831 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1832 && class == MODE_INT
1833 && GET_CODE (op1
) == CONST_INT
1834 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1835 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1836 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1839 rtx into_target
, outof_target
;
1840 rtx into_input
, outof_input
;
1842 int shift_count
, left_shift
, outof_word
;
1844 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1845 won't be accurate, so use a new target. Do this also if target is not
1846 a REG, first because having a register instead may open optimization
1847 opportunities, and second because if target and op0 happen to be MEMs
1848 designating the same location, we would risk clobbering it too early
1849 in the code sequence we generate below. */
1850 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1851 target
= gen_reg_rtx (mode
);
1855 shift_count
= INTVAL (op1
);
1857 /* OUTOF_* is the word we are shifting bits away from, and
1858 INTO_* is the word that we are shifting bits towards, thus
1859 they differ depending on the direction of the shift and
1860 WORDS_BIG_ENDIAN. */
1862 left_shift
= (binoptab
== rotl_optab
);
1863 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1865 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1866 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1868 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1869 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1871 if (shift_count
== BITS_PER_WORD
)
1873 /* This is just a word swap. */
1874 emit_move_insn (outof_target
, into_input
);
1875 emit_move_insn (into_target
, outof_input
);
1880 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1881 rtx first_shift_count
, second_shift_count
;
1882 optab reverse_unsigned_shift
, unsigned_shift
;
1884 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1885 ? lshr_optab
: ashl_optab
);
1887 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1888 ? ashl_optab
: lshr_optab
);
1890 if (shift_count
> BITS_PER_WORD
)
1892 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1893 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1897 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1898 second_shift_count
= GEN_INT (shift_count
);
1901 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1902 outof_input
, first_shift_count
,
1903 NULL_RTX
, unsignedp
, next_methods
);
1904 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1905 into_input
, second_shift_count
,
1906 NULL_RTX
, unsignedp
, next_methods
);
1908 if (into_temp1
!= 0 && into_temp2
!= 0)
1909 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1910 into_target
, unsignedp
, next_methods
);
1914 if (inter
!= 0 && inter
!= into_target
)
1915 emit_move_insn (into_target
, inter
);
1917 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1918 into_input
, first_shift_count
,
1919 NULL_RTX
, unsignedp
, next_methods
);
1920 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1921 outof_input
, second_shift_count
,
1922 NULL_RTX
, unsignedp
, next_methods
);
1924 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1925 inter
= expand_binop (word_mode
, ior_optab
,
1926 outof_temp1
, outof_temp2
,
1927 outof_target
, unsignedp
, next_methods
);
1929 if (inter
!= 0 && inter
!= outof_target
)
1930 emit_move_insn (outof_target
, inter
);
1933 insns
= get_insns ();
1938 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1939 block to help the register allocator a bit. But a multi-word
1940 rotate will need all the input bits when setting the output
1941 bits, so there clearly is a conflict between the input and
1942 output registers. So we can't use a no-conflict block here. */
1948 /* These can be done a word at a time by propagating carries. */
1949 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1950 && class == MODE_INT
1951 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1952 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1955 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1956 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1957 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1958 rtx xop0
, xop1
, xtarget
;
1960 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1961 value is one of those, use it. Otherwise, use 1 since it is the
1962 one easiest to get. */
1963 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1964 int normalizep
= STORE_FLAG_VALUE
;
1969 /* Prepare the operands. */
1970 xop0
= force_reg (mode
, op0
);
1971 xop1
= force_reg (mode
, op1
);
1973 xtarget
= gen_reg_rtx (mode
);
1975 if (target
== 0 || !REG_P (target
))
1978 /* Indicate for flow that the entire target reg is being set. */
1980 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1982 /* Do the actual arithmetic. */
1983 for (i
= 0; i
< nwords
; i
++)
1985 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1986 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1987 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1988 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1991 /* Main add/subtract of the input operands. */
1992 x
= expand_binop (word_mode
, binoptab
,
1993 op0_piece
, op1_piece
,
1994 target_piece
, unsignedp
, next_methods
);
2000 /* Store carry from main add/subtract. */
2001 carry_out
= gen_reg_rtx (word_mode
);
2002 carry_out
= emit_store_flag_force (carry_out
,
2003 (binoptab
== add_optab
2006 word_mode
, 1, normalizep
);
2013 /* Add/subtract previous carry to main result. */
2014 newx
= expand_binop (word_mode
,
2015 normalizep
== 1 ? binoptab
: otheroptab
,
2017 NULL_RTX
, 1, next_methods
);
2021 /* Get out carry from adding/subtracting carry in. */
2022 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2023 carry_tmp
= emit_store_flag_force (carry_tmp
,
2024 (binoptab
== add_optab
2027 word_mode
, 1, normalizep
);
2029 /* Logical-ior the two poss. carry together. */
2030 carry_out
= expand_binop (word_mode
, ior_optab
,
2031 carry_out
, carry_tmp
,
2032 carry_out
, 0, next_methods
);
2036 emit_move_insn (target_piece
, newx
);
2040 if (x
!= target_piece
)
2041 emit_move_insn (target_piece
, x
);
2044 carry_in
= carry_out
;
2047 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2049 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
2050 || ! rtx_equal_p (target
, xtarget
))
2052 rtx temp
= emit_move_insn (target
, xtarget
);
2054 set_unique_reg_note (temp
,
2056 gen_rtx_fmt_ee (binoptab
->code
, mode
,
2067 delete_insns_since (last
);
2070 /* Attempt to synthesize double word multiplies using a sequence of word
2071 mode multiplications. We first attempt to generate a sequence using a
2072 more efficient unsigned widening multiply, and if that fails we then
2073 try using a signed widening multiply. */
2075 if (binoptab
== smul_optab
2076 && class == MODE_INT
2077 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2078 && optab_handler (smul_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
2079 && optab_handler (add_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
2081 rtx product
= NULL_RTX
;
2083 if (optab_handler (umul_widen_optab
, mode
)->insn_code
2084 != CODE_FOR_nothing
)
2086 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2089 delete_insns_since (last
);
2092 if (product
== NULL_RTX
2093 && optab_handler (smul_widen_optab
, mode
)->insn_code
2094 != CODE_FOR_nothing
)
2096 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2099 delete_insns_since (last
);
2102 if (product
!= NULL_RTX
)
2104 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2106 temp
= emit_move_insn (target
? target
: product
, product
);
2107 set_unique_reg_note (temp
,
2109 gen_rtx_fmt_ee (MULT
, mode
,
2117 /* It can't be open-coded in this mode.
2118 Use a library call if one is available and caller says that's ok. */
2120 libfunc
= optab_libfunc (binoptab
, mode
);
2122 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2126 enum machine_mode op1_mode
= mode
;
2131 if (shift_optab_p (binoptab
))
2133 op1_mode
= targetm
.libgcc_shift_count_mode ();
2134 /* Specify unsigned here,
2135 since negative shift counts are meaningless. */
2136 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2139 if (GET_MODE (op0
) != VOIDmode
2140 && GET_MODE (op0
) != mode
)
2141 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2143 /* Pass 1 for NO_QUEUE so we don't lose any increments
2144 if the libcall is cse'd or moved. */
2145 value
= emit_library_call_value (libfunc
,
2146 NULL_RTX
, LCT_CONST
, mode
, 2,
2147 op0
, mode
, op1x
, op1_mode
);
2149 insns
= get_insns ();
2152 target
= gen_reg_rtx (mode
);
2153 emit_libcall_block (insns
, target
, value
,
2154 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
2159 delete_insns_since (last
);
2161 /* It can't be done in this mode. Can we do it in a wider mode? */
2163 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2164 || methods
== OPTAB_MUST_WIDEN
))
2166 /* Caller says, don't even try. */
2167 delete_insns_since (entry_last
);
2171 /* Compute the value of METHODS to pass to recursive calls.
2172 Don't allow widening to be tried recursively. */
2174 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2176 /* Look for a wider mode of the same class for which it appears we can do
2179 if (CLASS_HAS_WIDER_MODES_P (class))
2181 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2182 wider_mode
!= VOIDmode
;
2183 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2185 if ((optab_handler (binoptab
, wider_mode
)->insn_code
2186 != CODE_FOR_nothing
)
2187 || (methods
== OPTAB_LIB
2188 && optab_libfunc (binoptab
, wider_mode
)))
2190 rtx xop0
= op0
, xop1
= op1
;
2193 /* For certain integer operations, we need not actually extend
2194 the narrow operands, as long as we will truncate
2195 the results to the same narrowness. */
2197 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2198 || binoptab
== xor_optab
2199 || binoptab
== add_optab
|| binoptab
== sub_optab
2200 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2201 && class == MODE_INT
)
2204 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2205 unsignedp
, no_extend
);
2207 /* The second operand of a shift must always be extended. */
2208 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2209 no_extend
&& binoptab
!= ashl_optab
);
2211 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2212 unsignedp
, methods
);
2215 if (class != MODE_INT
2216 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2217 GET_MODE_BITSIZE (wider_mode
)))
2220 target
= gen_reg_rtx (mode
);
2221 convert_move (target
, temp
, 0);
2225 return gen_lowpart (mode
, temp
);
2228 delete_insns_since (last
);
2233 delete_insns_since (entry_last
);
2237 /* Expand a binary operator which has both signed and unsigned forms.
2238 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2241 If we widen unsigned operands, we may use a signed wider operation instead
2242 of an unsigned wider operation, since the result would be the same. */
2245 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2246 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2247 enum optab_methods methods
)
2250 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2251 struct optab wide_soptab
;
2253 /* Do it without widening, if possible. */
2254 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2255 unsignedp
, OPTAB_DIRECT
);
2256 if (temp
|| methods
== OPTAB_DIRECT
)
2259 /* Try widening to a signed int. Make a fake signed optab that
2260 hides any signed insn for direct use. */
2261 wide_soptab
= *soptab
;
2262 optab_handler (&wide_soptab
, mode
)->insn_code
= CODE_FOR_nothing
;
2263 /* We don't want to generate new hash table entries from this fake
2265 wide_soptab
.libcall_gen
= NULL
;
2267 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2268 unsignedp
, OPTAB_WIDEN
);
2270 /* For unsigned operands, try widening to an unsigned int. */
2271 if (temp
== 0 && unsignedp
)
2272 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2273 unsignedp
, OPTAB_WIDEN
);
2274 if (temp
|| methods
== OPTAB_WIDEN
)
2277 /* Use the right width lib call if that exists. */
2278 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2279 if (temp
|| methods
== OPTAB_LIB
)
2282 /* Must widen and use a lib call, use either signed or unsigned. */
2283 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2284 unsignedp
, methods
);
2288 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2289 unsignedp
, methods
);
2293 /* Generate code to perform an operation specified by UNOPPTAB
2294 on operand OP0, with two results to TARG0 and TARG1.
2295 We assume that the order of the operands for the instruction
2296 is TARG0, TARG1, OP0.
2298 Either TARG0 or TARG1 may be zero, but what that means is that
2299 the result is not actually wanted. We will generate it into
2300 a dummy pseudo-reg and discard it. They may not both be zero.
2302 Returns 1 if this operation can be performed; 0 if not. */
2305 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2308 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2309 enum mode_class
class;
2310 enum machine_mode wider_mode
;
2311 rtx entry_last
= get_last_insn ();
2314 class = GET_MODE_CLASS (mode
);
2317 targ0
= gen_reg_rtx (mode
);
2319 targ1
= gen_reg_rtx (mode
);
2321 /* Record where to go back to if we fail. */
2322 last
= get_last_insn ();
2324 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2326 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
2327 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2331 if (GET_MODE (xop0
) != VOIDmode
2332 && GET_MODE (xop0
) != mode0
)
2333 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2335 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2336 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2337 xop0
= copy_to_mode_reg (mode0
, xop0
);
2339 /* We could handle this, but we should always be called with a pseudo
2340 for our targets and all insns should take them as outputs. */
2341 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2342 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2344 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2351 delete_insns_since (last
);
2354 /* It can't be done in this mode. Can we do it in a wider mode? */
2356 if (CLASS_HAS_WIDER_MODES_P (class))
2358 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2359 wider_mode
!= VOIDmode
;
2360 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2362 if (optab_handler (unoptab
, wider_mode
)->insn_code
2363 != CODE_FOR_nothing
)
2365 rtx t0
= gen_reg_rtx (wider_mode
);
2366 rtx t1
= gen_reg_rtx (wider_mode
);
2367 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2369 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2371 convert_move (targ0
, t0
, unsignedp
);
2372 convert_move (targ1
, t1
, unsignedp
);
2376 delete_insns_since (last
);
2381 delete_insns_since (entry_last
);
2385 /* Generate code to perform an operation specified by BINOPTAB
2386 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2387 We assume that the order of the operands for the instruction
2388 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2389 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2391 Either TARG0 or TARG1 may be zero, but what that means is that
2392 the result is not actually wanted. We will generate it into
2393 a dummy pseudo-reg and discard it. They may not both be zero.
2395 Returns 1 if this operation can be performed; 0 if not. */
2398 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2401 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2402 enum mode_class
class;
2403 enum machine_mode wider_mode
;
2404 rtx entry_last
= get_last_insn ();
2407 class = GET_MODE_CLASS (mode
);
2410 targ0
= gen_reg_rtx (mode
);
2412 targ1
= gen_reg_rtx (mode
);
2414 /* Record where to go back to if we fail. */
2415 last
= get_last_insn ();
2417 if (optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2419 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
2420 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2421 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2423 rtx xop0
= op0
, xop1
= op1
;
2425 /* If we are optimizing, force expensive constants into a register. */
2426 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
2427 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
2429 /* In case the insn wants input operands in modes different from
2430 those of the actual operands, convert the operands. It would
2431 seem that we don't need to convert CONST_INTs, but we do, so
2432 that they're properly zero-extended, sign-extended or truncated
2435 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2436 xop0
= convert_modes (mode0
,
2437 GET_MODE (op0
) != VOIDmode
2442 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2443 xop1
= convert_modes (mode1
,
2444 GET_MODE (op1
) != VOIDmode
2449 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2450 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2451 xop0
= copy_to_mode_reg (mode0
, xop0
);
2453 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2454 xop1
= copy_to_mode_reg (mode1
, xop1
);
2456 /* We could handle this, but we should always be called with a pseudo
2457 for our targets and all insns should take them as outputs. */
2458 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2459 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2461 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2468 delete_insns_since (last
);
2471 /* It can't be done in this mode. Can we do it in a wider mode? */
2473 if (CLASS_HAS_WIDER_MODES_P (class))
2475 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2476 wider_mode
!= VOIDmode
;
2477 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2479 if (optab_handler (binoptab
, wider_mode
)->insn_code
2480 != CODE_FOR_nothing
)
2482 rtx t0
= gen_reg_rtx (wider_mode
);
2483 rtx t1
= gen_reg_rtx (wider_mode
);
2484 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2485 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2487 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2490 convert_move (targ0
, t0
, unsignedp
);
2491 convert_move (targ1
, t1
, unsignedp
);
2495 delete_insns_since (last
);
2500 delete_insns_since (entry_last
);
2504 /* Expand the two-valued library call indicated by BINOPTAB, but
2505 preserve only one of the values. If TARG0 is non-NULL, the first
2506 value is placed into TARG0; otherwise the second value is placed
2507 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2508 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2509 This routine assumes that the value returned by the library call is
2510 as if the return value was of an integral mode twice as wide as the
2511 mode of OP0. Returns 1 if the call was successful. */
2514 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2515 rtx targ0
, rtx targ1
, enum rtx_code code
)
2517 enum machine_mode mode
;
2518 enum machine_mode libval_mode
;
2523 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2524 gcc_assert (!targ0
!= !targ1
);
2526 mode
= GET_MODE (op0
);
2527 libfunc
= optab_libfunc (binoptab
, mode
);
2531 /* The value returned by the library function will have twice as
2532 many bits as the nominal MODE. */
2533 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2536 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2540 /* Get the part of VAL containing the value that we want. */
2541 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2542 targ0
? 0 : GET_MODE_SIZE (mode
));
2543 insns
= get_insns ();
2545 /* Move the into the desired location. */
2546 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2547 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2553 /* Wrapper around expand_unop which takes an rtx code to specify
2554 the operation to perform, not an optab pointer. All other
2555 arguments are the same. */
2557 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2558 rtx target
, int unsignedp
)
2560 optab unop
= code_to_optab
[(int) code
];
2563 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2569 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2571 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2573 enum mode_class
class = GET_MODE_CLASS (mode
);
2574 if (CLASS_HAS_WIDER_MODES_P (class))
2576 enum machine_mode wider_mode
;
2577 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2578 wider_mode
!= VOIDmode
;
2579 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2581 if (optab_handler (clz_optab
, wider_mode
)->insn_code
2582 != CODE_FOR_nothing
)
2584 rtx xop0
, temp
, last
;
2586 last
= get_last_insn ();
2589 target
= gen_reg_rtx (mode
);
2590 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2591 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2593 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2594 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2595 - GET_MODE_BITSIZE (mode
)),
2596 target
, true, OPTAB_DIRECT
);
2598 delete_insns_since (last
);
2607 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2608 quantities, choosing which based on whether the high word is nonzero. */
2610 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2612 rtx xop0
= force_reg (mode
, op0
);
2613 rtx subhi
= gen_highpart (word_mode
, xop0
);
2614 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2615 rtx hi0_label
= gen_label_rtx ();
2616 rtx after_label
= gen_label_rtx ();
2617 rtx seq
, temp
, result
;
2619 /* If we were not given a target, use a word_mode register, not a
2620 'mode' register. The result will fit, and nobody is expecting
2621 anything bigger (the return type of __builtin_clz* is int). */
2623 target
= gen_reg_rtx (word_mode
);
2625 /* In any case, write to a word_mode scratch in both branches of the
2626 conditional, so we can ensure there is a single move insn setting
2627 'target' to tag a REG_EQUAL note on. */
2628 result
= gen_reg_rtx (word_mode
);
2632 /* If the high word is not equal to zero,
2633 then clz of the full value is clz of the high word. */
2634 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2635 word_mode
, true, hi0_label
);
2637 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2642 convert_move (result
, temp
, true);
2644 emit_jump_insn (gen_jump (after_label
));
2647 /* Else clz of the full value is clz of the low word plus the number
2648 of bits in the high word. */
2649 emit_label (hi0_label
);
2651 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2654 temp
= expand_binop (word_mode
, add_optab
, temp
,
2655 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2656 result
, true, OPTAB_DIRECT
);
2660 convert_move (result
, temp
, true);
2662 emit_label (after_label
);
2663 convert_move (target
, result
, true);
2668 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2680 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2682 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2684 enum mode_class
class = GET_MODE_CLASS (mode
);
2685 enum machine_mode wider_mode
;
2688 if (!CLASS_HAS_WIDER_MODES_P (class))
2691 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2692 wider_mode
!= VOIDmode
;
2693 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2694 if (optab_handler (bswap_optab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
2699 last
= get_last_insn ();
2701 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2702 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2705 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2706 size_int (GET_MODE_BITSIZE (wider_mode
)
2707 - GET_MODE_BITSIZE (mode
)),
2713 target
= gen_reg_rtx (mode
);
2714 emit_move_insn (target
, gen_lowpart (mode
, x
));
2717 delete_insns_since (last
);
2722 /* Try calculating bswap as two bswaps of two word-sized operands. */
2725 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2729 t1
= expand_unop (word_mode
, bswap_optab
,
2730 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2731 t0
= expand_unop (word_mode
, bswap_optab
,
2732 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2735 target
= gen_reg_rtx (mode
);
2737 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2738 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2739 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2744 /* Try calculating (parity x) as (and (popcount x) 1), where
2745 popcount can also be done in a wider mode. */
2747 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2749 enum mode_class
class = GET_MODE_CLASS (mode
);
2750 if (CLASS_HAS_WIDER_MODES_P (class))
2752 enum machine_mode wider_mode
;
2753 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2754 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2756 if (optab_handler (popcount_optab
, wider_mode
)->insn_code
2757 != CODE_FOR_nothing
)
2759 rtx xop0
, temp
, last
;
2761 last
= get_last_insn ();
2764 target
= gen_reg_rtx (mode
);
2765 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2766 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2769 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2770 target
, true, OPTAB_DIRECT
);
2772 delete_insns_since (last
);
2781 /* Try calculating ctz(x) as K - clz(x & -x) ,
2782 where K is GET_MODE_BITSIZE(mode) - 1.
2784 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2785 don't have to worry about what the hardware does in that case. (If
2786 the clz instruction produces the usual value at 0, which is K, the
2787 result of this code sequence will be -1; expand_ffs, below, relies
2788 on this. It might be nice to have it be K instead, for consistency
2789 with the (very few) processors that provide a ctz with a defined
2790 value, but that would take one more instruction, and it would be
2791 less convenient for expand_ffs anyway. */
2794 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2798 if (optab_handler (clz_optab
, mode
)->insn_code
== CODE_FOR_nothing
)
2803 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2805 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2806 true, OPTAB_DIRECT
);
2808 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2810 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_BITSIZE (mode
) - 1),
2812 true, OPTAB_DIRECT
);
2822 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2828 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2829 else with the sequence used by expand_clz.
2831 The ffs builtin promises to return zero for a zero value and ctz/clz
2832 may have an undefined value in that case. If they do not give us a
2833 convenient value, we have to generate a test and branch. */
2835 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2837 HOST_WIDE_INT val
= 0;
2838 bool defined_at_zero
= false;
2841 if (optab_handler (ctz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2845 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2849 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2851 else if (optab_handler (clz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2854 temp
= expand_ctz (mode
, op0
, 0);
2858 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2860 defined_at_zero
= true;
2861 val
= (GET_MODE_BITSIZE (mode
) - 1) - val
;
2867 if (defined_at_zero
&& val
== -1)
2868 /* No correction needed at zero. */;
2871 /* We don't try to do anything clever with the situation found
2872 on some processors (eg Alpha) where ctz(0:mode) ==
2873 bitsize(mode). If someone can think of a way to send N to -1
2874 and leave alone all values in the range 0..N-1 (where N is a
2875 power of two), cheaper than this test-and-branch, please add it.
2877 The test-and-branch is done after the operation itself, in case
2878 the operation sets condition codes that can be recycled for this.
2879 (This is true on i386, for instance.) */
2881 rtx nonzero_label
= gen_label_rtx ();
2882 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2883 mode
, true, nonzero_label
);
2885 convert_move (temp
, GEN_INT (-1), false);
2886 emit_label (nonzero_label
);
2889 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2890 to produce a value in the range 0..bitsize. */
2891 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2892 target
, false, OPTAB_DIRECT
);
2899 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2908 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2909 conditions, VAL may already be a SUBREG against which we cannot generate
2910 a further SUBREG. In this case, we expect forcing the value into a
2911 register will work around the situation. */
2914 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2915 enum machine_mode imode
)
2918 ret
= lowpart_subreg (omode
, val
, imode
);
2921 val
= force_reg (imode
, val
);
2922 ret
= lowpart_subreg (omode
, val
, imode
);
2923 gcc_assert (ret
!= NULL
);
2928 /* Expand a floating point absolute value or negation operation via a
2929 logical operation on the sign bit. */
2932 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2933 rtx op0
, rtx target
)
2935 const struct real_format
*fmt
;
2936 int bitpos
, word
, nwords
, i
;
2937 enum machine_mode imode
;
2938 HOST_WIDE_INT hi
, lo
;
2941 /* The format has to have a simple sign bit. */
2942 fmt
= REAL_MODE_FORMAT (mode
);
2946 bitpos
= fmt
->signbit_rw
;
2950 /* Don't create negative zeros if the format doesn't support them. */
2951 if (code
== NEG
&& !fmt
->has_signed_zero
)
2954 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2956 imode
= int_mode_for_mode (mode
);
2957 if (imode
== BLKmode
)
2966 if (FLOAT_WORDS_BIG_ENDIAN
)
2967 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2969 word
= bitpos
/ BITS_PER_WORD
;
2970 bitpos
= bitpos
% BITS_PER_WORD
;
2971 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2974 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2977 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2981 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2987 if (target
== 0 || target
== op0
)
2988 target
= gen_reg_rtx (mode
);
2994 for (i
= 0; i
< nwords
; ++i
)
2996 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2997 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3001 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3003 immed_double_const (lo
, hi
, imode
),
3004 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3005 if (temp
!= targ_piece
)
3006 emit_move_insn (targ_piece
, temp
);
3009 emit_move_insn (targ_piece
, op0_piece
);
3012 insns
= get_insns ();
3015 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
3016 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
3020 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3021 gen_lowpart (imode
, op0
),
3022 immed_double_const (lo
, hi
, imode
),
3023 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3024 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3026 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
3027 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
3033 /* As expand_unop, but will fail rather than attempt the operation in a
3034 different mode or with a libcall. */
3036 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3039 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
3041 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
3042 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3044 rtx last
= get_last_insn ();
3050 temp
= gen_reg_rtx (mode
);
3052 if (GET_MODE (xop0
) != VOIDmode
3053 && GET_MODE (xop0
) != mode0
)
3054 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
3056 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3058 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
3059 xop0
= copy_to_mode_reg (mode0
, xop0
);
3061 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
3062 temp
= gen_reg_rtx (mode
);
3064 pat
= GEN_FCN (icode
) (temp
, xop0
);
3067 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3068 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
3070 delete_insns_since (last
);
3071 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3079 delete_insns_since (last
);
3084 /* Generate code to perform an operation specified by UNOPTAB
3085 on operand OP0, with result having machine-mode MODE.
3087 UNSIGNEDP is for the case where we have to widen the operands
3088 to perform the operation. It says to use zero-extension.
3090 If TARGET is nonzero, the value
3091 is generated there, if it is convenient to do so.
3092 In all cases an rtx is returned for the locus of the value;
3093 this may or may not be TARGET. */
3096 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3099 enum mode_class
class = GET_MODE_CLASS (mode
);
3100 enum machine_mode wider_mode
;
3104 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3108 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3110 /* Widening (or narrowing) clz needs special treatment. */
3111 if (unoptab
== clz_optab
)
3113 temp
= widen_clz (mode
, op0
, target
);
3117 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3118 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3120 temp
= expand_doubleword_clz (mode
, op0
, target
);
3128 /* Widening (or narrowing) bswap needs special treatment. */
3129 if (unoptab
== bswap_optab
)
3131 temp
= widen_bswap (mode
, op0
, target
);
3135 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3136 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3138 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3146 if (CLASS_HAS_WIDER_MODES_P (class))
3147 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3148 wider_mode
!= VOIDmode
;
3149 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3151 if (optab_handler (unoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
3154 rtx last
= get_last_insn ();
3156 /* For certain operations, we need not actually extend
3157 the narrow operand, as long as we will truncate the
3158 results to the same narrowness. */
3160 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3161 (unoptab
== neg_optab
3162 || unoptab
== one_cmpl_optab
)
3163 && class == MODE_INT
);
3165 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3170 if (class != MODE_INT
3171 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
3172 GET_MODE_BITSIZE (wider_mode
)))
3175 target
= gen_reg_rtx (mode
);
3176 convert_move (target
, temp
, 0);
3180 return gen_lowpart (mode
, temp
);
3183 delete_insns_since (last
);
3187 /* These can be done a word at a time. */
3188 if (unoptab
== one_cmpl_optab
3189 && class == MODE_INT
3190 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3191 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3196 if (target
== 0 || target
== op0
)
3197 target
= gen_reg_rtx (mode
);
3201 /* Do the actual arithmetic. */
3202 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3204 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3205 rtx x
= expand_unop (word_mode
, unoptab
,
3206 operand_subword_force (op0
, i
, mode
),
3207 target_piece
, unsignedp
);
3209 if (target_piece
!= x
)
3210 emit_move_insn (target_piece
, x
);
3213 insns
= get_insns ();
3216 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
3217 gen_rtx_fmt_e (unoptab
->code
, mode
,
3222 if (unoptab
->code
== NEG
)
3224 /* Try negating floating point values by flipping the sign bit. */
3225 if (SCALAR_FLOAT_MODE_P (mode
))
3227 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3232 /* If there is no negation pattern, and we have no negative zero,
3233 try subtracting from zero. */
3234 if (!HONOR_SIGNED_ZEROS (mode
))
3236 temp
= expand_binop (mode
, (unoptab
== negv_optab
3237 ? subv_optab
: sub_optab
),
3238 CONST0_RTX (mode
), op0
, target
,
3239 unsignedp
, OPTAB_DIRECT
);
3245 /* Try calculating parity (x) as popcount (x) % 2. */
3246 if (unoptab
== parity_optab
)
3248 temp
= expand_parity (mode
, op0
, target
);
3253 /* Try implementing ffs (x) in terms of clz (x). */
3254 if (unoptab
== ffs_optab
)
3256 temp
= expand_ffs (mode
, op0
, target
);
3261 /* Try implementing ctz (x) in terms of clz (x). */
3262 if (unoptab
== ctz_optab
)
3264 temp
= expand_ctz (mode
, op0
, target
);
3270 /* Now try a library call in this mode. */
3271 libfunc
= optab_libfunc (unoptab
, mode
);
3277 enum machine_mode outmode
= mode
;
3279 /* All of these functions return small values. Thus we choose to
3280 have them return something that isn't a double-word. */
3281 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3282 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
3284 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
3288 /* Pass 1 for NO_QUEUE so we don't lose any increments
3289 if the libcall is cse'd or moved. */
3290 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3292 insns
= get_insns ();
3295 target
= gen_reg_rtx (outmode
);
3296 eq_value
= gen_rtx_fmt_e (unoptab
->code
, mode
, op0
);
3297 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
3298 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3299 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
3300 eq_value
= simplify_gen_unary (ZERO_EXTEND
, outmode
, eq_value
, mode
);
3301 emit_libcall_block (insns
, target
, value
, eq_value
);
3306 /* It can't be done in this mode. Can we do it in a wider mode? */
3308 if (CLASS_HAS_WIDER_MODES_P (class))
3310 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3311 wider_mode
!= VOIDmode
;
3312 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3314 if ((optab_handler (unoptab
, wider_mode
)->insn_code
3315 != CODE_FOR_nothing
)
3316 || optab_libfunc (unoptab
, wider_mode
))
3319 rtx last
= get_last_insn ();
3321 /* For certain operations, we need not actually extend
3322 the narrow operand, as long as we will truncate the
3323 results to the same narrowness. */
3325 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3326 (unoptab
== neg_optab
3327 || unoptab
== one_cmpl_optab
)
3328 && class == MODE_INT
);
3330 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3333 /* If we are generating clz using wider mode, adjust the
3335 if (unoptab
== clz_optab
&& temp
!= 0)
3336 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3337 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
3338 - GET_MODE_BITSIZE (mode
)),
3339 target
, true, OPTAB_DIRECT
);
3343 if (class != MODE_INT
)
3346 target
= gen_reg_rtx (mode
);
3347 convert_move (target
, temp
, 0);
3351 return gen_lowpart (mode
, temp
);
3354 delete_insns_since (last
);
3359 /* One final attempt at implementing negation via subtraction,
3360 this time allowing widening of the operand. */
3361 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3364 temp
= expand_binop (mode
,
3365 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3366 CONST0_RTX (mode
), op0
,
3367 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3375 /* Emit code to compute the absolute value of OP0, with result to
3376 TARGET if convenient. (TARGET may be 0.) The return value says
3377 where the result actually is to be found.
3379 MODE is the mode of the operand; the mode of the result is
3380 different but can be deduced from MODE.
3385 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3386 int result_unsignedp
)
3391 result_unsignedp
= 1;
3393 /* First try to do it with a special abs instruction. */
3394 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3399 /* For floating point modes, try clearing the sign bit. */
3400 if (SCALAR_FLOAT_MODE_P (mode
))
3402 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3407 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3408 if (optab_handler (smax_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3409 && !HONOR_SIGNED_ZEROS (mode
))
3411 rtx last
= get_last_insn ();
3413 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3415 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3421 delete_insns_since (last
);
3424 /* If this machine has expensive jumps, we can do integer absolute
3425 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3426 where W is the width of MODE. */
3428 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
3430 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3431 size_int (GET_MODE_BITSIZE (mode
) - 1),
3434 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3437 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3438 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3448 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3449 int result_unsignedp
, int safe
)
3454 result_unsignedp
= 1;
3456 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3460 /* If that does not win, use conditional jump and negate. */
3462 /* It is safe to use the target if it is the same
3463 as the source if this is also a pseudo register */
3464 if (op0
== target
&& REG_P (op0
)
3465 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3468 op1
= gen_label_rtx ();
3469 if (target
== 0 || ! safe
3470 || GET_MODE (target
) != mode
3471 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3473 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3474 target
= gen_reg_rtx (mode
);
3476 emit_move_insn (target
, op0
);
3479 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3480 NULL_RTX
, NULL_RTX
, op1
);
3482 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3485 emit_move_insn (target
, op0
);
3491 /* A subroutine of expand_copysign, perform the copysign operation using the
3492 abs and neg primitives advertised to exist on the target. The assumption
3493 is that we have a split register file, and leaving op0 in fp registers,
3494 and not playing with subregs so much, will help the register allocator. */
3497 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3498 int bitpos
, bool op0_is_abs
)
3500 enum machine_mode imode
;
3507 /* Check if the back end provides an insn that handles signbit for the
3509 icode
= (int) signbit_optab
->handlers
[(int) mode
].insn_code
;
3510 if (icode
!= CODE_FOR_nothing
)
3512 imode
= insn_data
[icode
].operand
[0].mode
;
3513 sign
= gen_reg_rtx (imode
);
3514 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3518 HOST_WIDE_INT hi
, lo
;
3520 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3522 imode
= int_mode_for_mode (mode
);
3523 if (imode
== BLKmode
)
3525 op1
= gen_lowpart (imode
, op1
);
3532 if (FLOAT_WORDS_BIG_ENDIAN
)
3533 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3535 word
= bitpos
/ BITS_PER_WORD
;
3536 bitpos
= bitpos
% BITS_PER_WORD
;
3537 op1
= operand_subword_force (op1
, word
, mode
);
3540 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3543 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3547 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3551 sign
= gen_reg_rtx (imode
);
3552 sign
= expand_binop (imode
, and_optab
, op1
,
3553 immed_double_const (lo
, hi
, imode
),
3554 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3559 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3566 if (target
== NULL_RTX
)
3567 target
= copy_to_reg (op0
);
3569 emit_move_insn (target
, op0
);
3572 label
= gen_label_rtx ();
3573 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3575 if (GET_CODE (op0
) == CONST_DOUBLE
)
3576 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3578 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3580 emit_move_insn (target
, op0
);
3588 /* A subroutine of expand_copysign, perform the entire copysign operation
3589 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3590 is true if op0 is known to have its sign bit clear. */
3593 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3594 int bitpos
, bool op0_is_abs
)
3596 enum machine_mode imode
;
3597 HOST_WIDE_INT hi
, lo
;
3598 int word
, nwords
, i
;
3601 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3603 imode
= int_mode_for_mode (mode
);
3604 if (imode
== BLKmode
)
3613 if (FLOAT_WORDS_BIG_ENDIAN
)
3614 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3616 word
= bitpos
/ BITS_PER_WORD
;
3617 bitpos
= bitpos
% BITS_PER_WORD
;
3618 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3621 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3624 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3628 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3632 if (target
== 0 || target
== op0
|| target
== op1
)
3633 target
= gen_reg_rtx (mode
);
3639 for (i
= 0; i
< nwords
; ++i
)
3641 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3642 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3647 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3648 immed_double_const (~lo
, ~hi
, imode
),
3649 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3651 op1
= expand_binop (imode
, and_optab
,
3652 operand_subword_force (op1
, i
, mode
),
3653 immed_double_const (lo
, hi
, imode
),
3654 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3656 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3657 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3658 if (temp
!= targ_piece
)
3659 emit_move_insn (targ_piece
, temp
);
3662 emit_move_insn (targ_piece
, op0_piece
);
3665 insns
= get_insns ();
3668 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3672 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3673 immed_double_const (lo
, hi
, imode
),
3674 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3676 op0
= gen_lowpart (imode
, op0
);
3678 op0
= expand_binop (imode
, and_optab
, op0
,
3679 immed_double_const (~lo
, ~hi
, imode
),
3680 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3682 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3683 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3684 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3690 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3691 scalar floating point mode. Return NULL if we do not know how to
3692 expand the operation inline. */
3695 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3697 enum machine_mode mode
= GET_MODE (op0
);
3698 const struct real_format
*fmt
;
3702 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3703 gcc_assert (GET_MODE (op1
) == mode
);
3705 /* First try to do it with a special instruction. */
3706 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3707 target
, 0, OPTAB_DIRECT
);
3711 fmt
= REAL_MODE_FORMAT (mode
);
3712 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3716 if (GET_CODE (op0
) == CONST_DOUBLE
)
3718 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3719 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3723 if (fmt
->signbit_ro
>= 0
3724 && (GET_CODE (op0
) == CONST_DOUBLE
3725 || (optab_handler (neg_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3726 && optab_handler (abs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)))
3728 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3729 fmt
->signbit_ro
, op0_is_abs
);
3734 if (fmt
->signbit_rw
< 0)
3736 return expand_copysign_bit (mode
, op0
, op1
, target
,
3737 fmt
->signbit_rw
, op0_is_abs
);
3740 /* Generate an instruction whose insn-code is INSN_CODE,
3741 with two operands: an output TARGET and an input OP0.
3742 TARGET *must* be nonzero, and the output is always stored there.
3743 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3744 the value that is stored into TARGET. */
3747 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3750 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3755 /* Now, if insn does not accept our operands, put them into pseudos. */
3757 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3758 op0
= copy_to_mode_reg (mode0
, op0
);
3760 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3761 temp
= gen_reg_rtx (GET_MODE (temp
));
3763 pat
= GEN_FCN (icode
) (temp
, op0
);
3765 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3766 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3771 emit_move_insn (target
, temp
);
3774 struct no_conflict_data
3776 rtx target
, first
, insn
;
3780 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3781 Set P->must_stay if the currently examined clobber / store has to stay
3782 in the list of insns that constitute the actual no_conflict block /
3785 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3787 struct no_conflict_data
*p
= p0
;
3789 /* If this inns directly contributes to setting the target, it must stay. */
3790 if (reg_overlap_mentioned_p (p
->target
, dest
))
3791 p
->must_stay
= true;
3792 /* If we haven't committed to keeping any other insns in the list yet,
3793 there is nothing more to check. */
3794 else if (p
->insn
== p
->first
)
3796 /* If this insn sets / clobbers a register that feeds one of the insns
3797 already in the list, this insn has to stay too. */
3798 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3799 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3800 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3801 /* Likewise if this insn depends on a register set by a previous
3802 insn in the list, or if it sets a result (presumably a hard
3803 register) that is set or clobbered by a previous insn.
3804 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3805 SET_DEST perform the former check on the address, and the latter
3806 check on the MEM. */
3807 || (GET_CODE (set
) == SET
3808 && (modified_in_p (SET_SRC (set
), p
->first
)
3809 || modified_in_p (SET_DEST (set
), p
->first
)
3810 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3811 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3812 p
->must_stay
= true;
3815 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3816 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3817 is possible to do so. */
3820 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3822 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3824 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3825 encapsulated region would not be in one basic block, i.e. when
3826 there is a control_flow_insn_p insn between FIRST and LAST. */
3827 bool attach_libcall_retval_notes
= true;
3828 rtx insn
, next
= NEXT_INSN (last
);
3830 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3831 if (control_flow_insn_p (insn
))
3833 attach_libcall_retval_notes
= false;
3837 if (attach_libcall_retval_notes
)
3839 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3841 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3847 /* Emit code to perform a series of operations on a multi-word quantity, one
3850 Such a block is preceded by a CLOBBER of the output, consists of multiple
3851 insns, each setting one word of the output, and followed by a SET copying
3852 the output to itself.
3854 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3855 note indicating that it doesn't conflict with the (also multi-word)
3856 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3859 INSNS is a block of code generated to perform the operation, not including
3860 the CLOBBER and final copy. All insns that compute intermediate values
3861 are first emitted, followed by the block as described above.
3863 TARGET, OP0, and OP1 are the output and inputs of the operations,
3864 respectively. OP1 may be zero for a unary operation.
3866 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3869 If TARGET is not a register, INSNS is simply emitted with no special
3870 processing. Likewise if anything in INSNS is not an INSN or if
3871 there is a libcall block inside INSNS.
3873 The final insn emitted is returned. */
3876 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3878 rtx prev
, next
, first
, last
, insn
;
3880 if (!REG_P (target
) || reload_in_progress
)
3881 return emit_insn (insns
);
3883 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3884 if (!NONJUMP_INSN_P (insn
)
3885 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3886 return emit_insn (insns
);
3888 /* First emit all insns that do not store into words of the output and remove
3889 these from the list. */
3890 for (insn
= insns
; insn
; insn
= next
)
3893 struct no_conflict_data data
;
3895 next
= NEXT_INSN (insn
);
3897 /* Some ports (cris) create a libcall regions at their own. We must
3898 avoid any potential nesting of LIBCALLs. */
3899 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3900 remove_note (insn
, note
);
3901 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3902 remove_note (insn
, note
);
3904 data
.target
= target
;
3908 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3909 if (! data
.must_stay
)
3911 if (PREV_INSN (insn
))
3912 NEXT_INSN (PREV_INSN (insn
)) = next
;
3917 PREV_INSN (next
) = PREV_INSN (insn
);
3923 prev
= get_last_insn ();
3925 /* Now write the CLOBBER of the output, followed by the setting of each
3926 of the words, followed by the final copy. */
3927 if (target
!= op0
&& target
!= op1
)
3928 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3930 for (insn
= insns
; insn
; insn
= next
)
3932 next
= NEXT_INSN (insn
);
3935 if (op1
&& REG_P (op1
))
3936 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3939 if (op0
&& REG_P (op0
))
3940 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3944 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
3945 != CODE_FOR_nothing
)
3947 last
= emit_move_insn (target
, target
);
3949 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3953 last
= get_last_insn ();
3955 /* Remove any existing REG_EQUAL note from "last", or else it will
3956 be mistaken for a note referring to the full contents of the
3957 alleged libcall value when found together with the REG_RETVAL
3958 note added below. An existing note can come from an insn
3959 expansion at "last". */
3960 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3964 first
= get_insns ();
3966 first
= NEXT_INSN (prev
);
3968 maybe_encapsulate_block (first
, last
, equiv
);
3973 /* Emit code to make a call to a constant function or a library call.
3975 INSNS is a list containing all insns emitted in the call.
3976 These insns leave the result in RESULT. Our block is to copy RESULT
3977 to TARGET, which is logically equivalent to EQUIV.
3979 We first emit any insns that set a pseudo on the assumption that these are
3980 loading constants into registers; doing so allows them to be safely cse'ed
3981 between blocks. Then we emit all the other insns in the block, followed by
3982 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3983 note with an operand of EQUIV.
3985 Moving assignments to pseudos outside of the block is done to improve
3986 the generated code, but is not required to generate correct code,
3987 hence being unable to move an assignment is not grounds for not making
3988 a libcall block. There are two reasons why it is safe to leave these
3989 insns inside the block: First, we know that these pseudos cannot be
3990 used in generated RTL outside the block since they are created for
3991 temporary purposes within the block. Second, CSE will not record the
3992 values of anything set inside a libcall block, so we know they must
3993 be dead at the end of the block.
3995 Except for the first group of insns (the ones setting pseudos), the
3996 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3998 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
4000 rtx final_dest
= target
;
4001 rtx prev
, next
, first
, last
, insn
;
4003 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4004 into a MEM later. Protect the libcall block from this change. */
4005 if (! REG_P (target
) || REG_USERVAR_P (target
))
4006 target
= gen_reg_rtx (GET_MODE (target
));
4008 /* If we're using non-call exceptions, a libcall corresponding to an
4009 operation that may trap may also trap. */
4010 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
4012 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4015 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4017 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
4018 remove_note (insn
, note
);
4022 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4023 reg note to indicate that this call cannot throw or execute a nonlocal
4024 goto (unless there is already a REG_EH_REGION note, in which case
4026 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4029 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4032 XEXP (note
, 0) = constm1_rtx
;
4034 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
4038 /* First emit all insns that set pseudos. Remove them from the list as
4039 we go. Avoid insns that set pseudos which were referenced in previous
4040 insns. These can be generated by move_by_pieces, for example,
4041 to update an address. Similarly, avoid insns that reference things
4042 set in previous insns. */
4044 for (insn
= insns
; insn
; insn
= next
)
4046 rtx set
= single_set (insn
);
4049 /* Some ports (cris) create a libcall regions at their own. We must
4050 avoid any potential nesting of LIBCALLs. */
4051 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
4052 remove_note (insn
, note
);
4053 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
4054 remove_note (insn
, note
);
4056 next
= NEXT_INSN (insn
);
4058 if (set
!= 0 && REG_P (SET_DEST (set
))
4059 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4061 struct no_conflict_data data
;
4063 data
.target
= const0_rtx
;
4067 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
4068 if (! data
.must_stay
)
4070 if (PREV_INSN (insn
))
4071 NEXT_INSN (PREV_INSN (insn
)) = next
;
4076 PREV_INSN (next
) = PREV_INSN (insn
);
4082 /* Some ports use a loop to copy large arguments onto the stack.
4083 Don't move anything outside such a loop. */
4088 prev
= get_last_insn ();
4090 /* Write the remaining insns followed by the final copy. */
4092 for (insn
= insns
; insn
; insn
= next
)
4094 next
= NEXT_INSN (insn
);
4099 last
= emit_move_insn (target
, result
);
4100 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
4101 != CODE_FOR_nothing
)
4102 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
4105 /* Remove any existing REG_EQUAL note from "last", or else it will
4106 be mistaken for a note referring to the full contents of the
4107 libcall value when found together with the REG_RETVAL note added
4108 below. An existing note can come from an insn expansion at
4110 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
4113 if (final_dest
!= target
)
4114 emit_move_insn (final_dest
, target
);
4117 first
= get_insns ();
4119 first
= NEXT_INSN (prev
);
4121 maybe_encapsulate_block (first
, last
, equiv
);
4124 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4125 PURPOSE describes how this comparison will be used. CODE is the rtx
4126 comparison code we will be using.
4128 ??? Actually, CODE is slightly weaker than that. A target is still
4129 required to implement all of the normal bcc operations, but not
4130 required to implement all (or any) of the unordered bcc operations. */
4133 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
4134 enum can_compare_purpose purpose
)
4138 if (optab_handler (cmp_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4140 if (purpose
== ccp_jump
)
4141 return bcc_gen_fctn
[(int) code
] != NULL
;
4142 else if (purpose
== ccp_store_flag
)
4143 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
4145 /* There's only one cmov entry point, and it's allowed to fail. */
4148 if (purpose
== ccp_jump
4149 && optab_handler (cbranch_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4151 if (purpose
== ccp_cmov
4152 && optab_handler (cmov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4154 if (purpose
== ccp_store_flag
4155 && optab_handler (cstore_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4157 mode
= GET_MODE_WIDER_MODE (mode
);
4159 while (mode
!= VOIDmode
);
4164 /* This function is called when we are going to emit a compare instruction that
4165 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4167 *PMODE is the mode of the inputs (in case they are const_int).
4168 *PUNSIGNEDP nonzero says that the operands are unsigned;
4169 this matters if they need to be widened.
4171 If they have mode BLKmode, then SIZE specifies the size of both operands.
4173 This function performs all the setup necessary so that the caller only has
4174 to emit a single comparison insn. This setup can involve doing a BLKmode
4175 comparison or emitting a library call to perform the comparison if no insn
4176 is available to handle it.
4177 The values which are passed in through pointers can be modified; the caller
4178 should perform the comparison on the modified values. Constant
4179 comparisons must have already been folded. */
4182 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
4183 enum machine_mode
*pmode
, int *punsignedp
,
4184 enum can_compare_purpose purpose
)
4186 enum machine_mode mode
= *pmode
;
4187 rtx x
= *px
, y
= *py
;
4188 int unsignedp
= *punsignedp
;
4191 /* If we are inside an appropriately-short loop and we are optimizing,
4192 force expensive constants into a register. */
4193 if (CONSTANT_P (x
) && optimize
4194 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
4195 x
= force_reg (mode
, x
);
4197 if (CONSTANT_P (y
) && optimize
4198 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
4199 y
= force_reg (mode
, y
);
4202 /* Make sure if we have a canonical comparison. The RTL
4203 documentation states that canonical comparisons are required only
4204 for targets which have cc0. */
4205 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4208 /* Don't let both operands fail to indicate the mode. */
4209 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4210 x
= force_reg (mode
, x
);
4212 /* Handle all BLKmode compares. */
4214 if (mode
== BLKmode
)
4216 enum machine_mode cmp_mode
, result_mode
;
4217 enum insn_code cmp_code
;
4222 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4226 /* Try to use a memory block compare insn - either cmpstr
4227 or cmpmem will do. */
4228 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4229 cmp_mode
!= VOIDmode
;
4230 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4232 cmp_code
= cmpmem_optab
[cmp_mode
];
4233 if (cmp_code
== CODE_FOR_nothing
)
4234 cmp_code
= cmpstr_optab
[cmp_mode
];
4235 if (cmp_code
== CODE_FOR_nothing
)
4236 cmp_code
= cmpstrn_optab
[cmp_mode
];
4237 if (cmp_code
== CODE_FOR_nothing
)
4240 /* Must make sure the size fits the insn's mode. */
4241 if ((GET_CODE (size
) == CONST_INT
4242 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4243 || (GET_MODE_BITSIZE (GET_MODE (size
))
4244 > GET_MODE_BITSIZE (cmp_mode
)))
4247 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4248 result
= gen_reg_rtx (result_mode
);
4249 size
= convert_to_mode (cmp_mode
, size
, 1);
4250 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4254 *pmode
= result_mode
;
4258 /* Otherwise call a library function, memcmp. */
4259 libfunc
= memcmp_libfunc
;
4260 length_type
= sizetype
;
4261 result_mode
= TYPE_MODE (integer_type_node
);
4262 cmp_mode
= TYPE_MODE (length_type
);
4263 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4264 TYPE_UNSIGNED (length_type
));
4266 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
4273 *pmode
= result_mode
;
4277 /* Don't allow operands to the compare to trap, as that can put the
4278 compare and branch in different basic blocks. */
4279 if (flag_non_call_exceptions
)
4282 x
= force_reg (mode
, x
);
4284 y
= force_reg (mode
, y
);
4289 if (can_compare_p (*pcomparison
, mode
, purpose
))
4292 /* Handle a lib call just for the mode we are using. */
4294 libfunc
= optab_libfunc (cmp_optab
, mode
);
4295 if (libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
4299 /* If we want unsigned, and this mode has a distinct unsigned
4300 comparison routine, use that. */
4303 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4308 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4309 targetm
.libgcc_cmp_return_mode (),
4310 2, x
, mode
, y
, mode
);
4312 /* There are two kinds of comparison routines. Biased routines
4313 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4314 of gcc expect that the comparison operation is equivalent
4315 to the modified comparison. For signed comparisons compare the
4316 result against 1 in the biased case, and zero in the unbiased
4317 case. For unsigned comparisons always compare against 1 after
4318 biasing the unbiased result by adding 1. This gives us a way to
4324 if (!TARGET_LIB_INT_CMP_BIASED
)
4327 *px
= plus_constant (result
, 1);
4334 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
4335 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4338 /* Before emitting an insn with code ICODE, make sure that X, which is going
4339 to be used for operand OPNUM of the insn, is converted from mode MODE to
4340 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4341 that it is accepted by the operand predicate. Return the new value. */
4344 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
4345 enum machine_mode wider_mode
, int unsignedp
)
4347 if (mode
!= wider_mode
)
4348 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4350 if (!insn_data
[icode
].operand
[opnum
].predicate
4351 (x
, insn_data
[icode
].operand
[opnum
].mode
))
4353 if (reload_completed
)
4355 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
4361 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4362 we can do the comparison.
4363 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4364 be NULL_RTX which indicates that only a comparison is to be generated. */
4367 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
4368 enum rtx_code comparison
, int unsignedp
, rtx label
)
4370 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
4371 enum mode_class
class = GET_MODE_CLASS (mode
);
4372 enum machine_mode wider_mode
= mode
;
4374 /* Try combined insns first. */
4377 enum insn_code icode
;
4378 PUT_MODE (test
, wider_mode
);
4382 icode
= optab_handler (cbranch_optab
, wider_mode
)->insn_code
;
4384 if (icode
!= CODE_FOR_nothing
4385 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
4387 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
4388 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
4389 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
4394 /* Handle some compares against zero. */
4395 icode
= (int) optab_handler (tst_optab
, wider_mode
)->insn_code
;
4396 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
4398 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4399 emit_insn (GEN_FCN (icode
) (x
));
4401 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4405 /* Handle compares for which there is a directly suitable insn. */
4407 icode
= (int) optab_handler (cmp_optab
, wider_mode
)->insn_code
;
4408 if (icode
!= CODE_FOR_nothing
)
4410 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4411 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
4412 emit_insn (GEN_FCN (icode
) (x
, y
));
4414 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4418 if (!CLASS_HAS_WIDER_MODES_P (class))
4421 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
4423 while (wider_mode
!= VOIDmode
);
4428 /* Generate code to compare X with Y so that the condition codes are
4429 set and to jump to LABEL if the condition is true. If X is a
4430 constant and Y is not a constant, then the comparison is swapped to
4431 ensure that the comparison RTL has the canonical form.
4433 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4434 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4435 the proper branch condition code.
4437 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4439 MODE is the mode of the inputs (in case they are const_int).
4441 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4442 be passed unchanged to emit_cmp_insn, then potentially converted into an
4443 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4446 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4447 enum machine_mode mode
, int unsignedp
, rtx label
)
4449 rtx op0
= x
, op1
= y
;
4451 /* Swap operands and condition to ensure canonical RTL. */
4452 if (swap_commutative_operands_p (x
, y
))
4454 /* If we're not emitting a branch, callers are required to pass
4455 operands in an order conforming to canonical RTL. We relax this
4456 for commutative comparisons so callers using EQ don't need to do
4457 swapping by hand. */
4458 gcc_assert (label
|| (comparison
== swap_condition (comparison
)));
4461 comparison
= swap_condition (comparison
);
4465 /* If OP0 is still a constant, then both X and Y must be constants.
4466 Force X into a register to create canonical RTL. */
4467 if (CONSTANT_P (op0
))
4468 op0
= force_reg (mode
, op0
);
4472 comparison
= unsigned_condition (comparison
);
4474 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
4476 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
4479 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4482 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4483 enum machine_mode mode
, int unsignedp
)
4485 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
4488 /* Emit a library call comparison between floating point X and Y.
4489 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4492 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
4493 enum machine_mode
*pmode
, int *punsignedp
)
4495 enum rtx_code comparison
= *pcomparison
;
4496 enum rtx_code swapped
= swap_condition (comparison
);
4497 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4500 enum machine_mode orig_mode
= GET_MODE (x
);
4501 enum machine_mode mode
, cmp_mode
;
4502 rtx value
, target
, insns
, equiv
;
4504 bool reversed_p
= false;
4505 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4507 for (mode
= orig_mode
;
4509 mode
= GET_MODE_WIDER_MODE (mode
))
4511 if ((libfunc
= optab_libfunc (code_to_optab
[comparison
], mode
)))
4514 if ((libfunc
= optab_libfunc (code_to_optab
[swapped
] , mode
)))
4517 tmp
= x
; x
= y
; y
= tmp
;
4518 comparison
= swapped
;
4522 if ((libfunc
= optab_libfunc (code_to_optab
[reversed
], mode
))
4523 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
4525 comparison
= reversed
;
4531 gcc_assert (mode
!= VOIDmode
);
4533 if (mode
!= orig_mode
)
4535 x
= convert_to_mode (mode
, x
, 0);
4536 y
= convert_to_mode (mode
, y
, 0);
4539 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4540 the RTL. The allows the RTL optimizers to delete the libcall if the
4541 condition can be determined at compile-time. */
4542 if (comparison
== UNORDERED
)
4544 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4545 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4546 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4547 temp
, const_true_rtx
, equiv
);
4551 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4552 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4554 rtx true_rtx
, false_rtx
;
4559 true_rtx
= const0_rtx
;
4560 false_rtx
= const_true_rtx
;
4564 true_rtx
= const_true_rtx
;
4565 false_rtx
= const0_rtx
;
4569 true_rtx
= const1_rtx
;
4570 false_rtx
= const0_rtx
;
4574 true_rtx
= const0_rtx
;
4575 false_rtx
= constm1_rtx
;
4579 true_rtx
= constm1_rtx
;
4580 false_rtx
= const0_rtx
;
4584 true_rtx
= const0_rtx
;
4585 false_rtx
= const1_rtx
;
4591 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4592 equiv
, true_rtx
, false_rtx
);
4597 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4598 cmp_mode
, 2, x
, mode
, y
, mode
);
4599 insns
= get_insns ();
4602 target
= gen_reg_rtx (cmp_mode
);
4603 emit_libcall_block (insns
, target
, value
, equiv
);
4605 if (comparison
== UNORDERED
4606 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4607 comparison
= reversed_p
? EQ
: NE
;
4612 *pcomparison
= comparison
;
4616 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4619 emit_indirect_jump (rtx loc
)
4621 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4623 loc
= copy_to_mode_reg (Pmode
, loc
);
4625 emit_jump_insn (gen_indirect_jump (loc
));
4629 #ifdef HAVE_conditional_move
4631 /* Emit a conditional move instruction if the machine supports one for that
4632 condition and machine mode.
4634 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4635 the mode to use should they be constants. If it is VOIDmode, they cannot
4638 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4639 should be stored there. MODE is the mode to use should they be constants.
4640 If it is VOIDmode, they cannot both be constants.
4642 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4643 is not supported. */
4646 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4647 enum machine_mode cmode
, rtx op2
, rtx op3
,
4648 enum machine_mode mode
, int unsignedp
)
4650 rtx tem
, subtarget
, comparison
, insn
;
4651 enum insn_code icode
;
4652 enum rtx_code reversed
;
4654 /* If one operand is constant, make it the second one. Only do this
4655 if the other operand is not constant as well. */
4657 if (swap_commutative_operands_p (op0
, op1
))
4662 code
= swap_condition (code
);
4665 /* get_condition will prefer to generate LT and GT even if the old
4666 comparison was against zero, so undo that canonicalization here since
4667 comparisons against zero are cheaper. */
4668 if (code
== LT
&& op1
== const1_rtx
)
4669 code
= LE
, op1
= const0_rtx
;
4670 else if (code
== GT
&& op1
== constm1_rtx
)
4671 code
= GE
, op1
= const0_rtx
;
4673 if (cmode
== VOIDmode
)
4674 cmode
= GET_MODE (op0
);
4676 if (swap_commutative_operands_p (op2
, op3
)
4677 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4686 if (mode
== VOIDmode
)
4687 mode
= GET_MODE (op2
);
4689 icode
= movcc_gen_code
[mode
];
4691 if (icode
== CODE_FOR_nothing
)
4695 target
= gen_reg_rtx (mode
);
4699 /* If the insn doesn't accept these operands, put them in pseudos. */
4701 if (!insn_data
[icode
].operand
[0].predicate
4702 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4703 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4705 if (!insn_data
[icode
].operand
[2].predicate
4706 (op2
, insn_data
[icode
].operand
[2].mode
))
4707 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4709 if (!insn_data
[icode
].operand
[3].predicate
4710 (op3
, insn_data
[icode
].operand
[3].mode
))
4711 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4713 /* Everything should now be in the suitable form, so emit the compare insn
4714 and then the conditional move. */
4717 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4719 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4720 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4721 return NULL and let the caller figure out how best to deal with this
4723 if (GET_CODE (comparison
) != code
)
4726 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4728 /* If that failed, then give up. */
4734 if (subtarget
!= target
)
4735 convert_move (target
, subtarget
, 0);
4740 /* Return nonzero if a conditional move of mode MODE is supported.
4742 This function is for combine so it can tell whether an insn that looks
4743 like a conditional move is actually supported by the hardware. If we
4744 guess wrong we lose a bit on optimization, but that's it. */
4745 /* ??? sparc64 supports conditionally moving integers values based on fp
4746 comparisons, and vice versa. How do we handle them? */
4749 can_conditionally_move_p (enum machine_mode mode
)
4751 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4757 #endif /* HAVE_conditional_move */
4759 /* Emit a conditional addition instruction if the machine supports one for that
4760 condition and machine mode.
4762 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4763 the mode to use should they be constants. If it is VOIDmode, they cannot
4766 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4767 should be stored there. MODE is the mode to use should they be constants.
4768 If it is VOIDmode, they cannot both be constants.
4770 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4771 is not supported. */
4774 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4775 enum machine_mode cmode
, rtx op2
, rtx op3
,
4776 enum machine_mode mode
, int unsignedp
)
4778 rtx tem
, subtarget
, comparison
, insn
;
4779 enum insn_code icode
;
4780 enum rtx_code reversed
;
4782 /* If one operand is constant, make it the second one. Only do this
4783 if the other operand is not constant as well. */
4785 if (swap_commutative_operands_p (op0
, op1
))
4790 code
= swap_condition (code
);
4793 /* get_condition will prefer to generate LT and GT even if the old
4794 comparison was against zero, so undo that canonicalization here since
4795 comparisons against zero are cheaper. */
4796 if (code
== LT
&& op1
== const1_rtx
)
4797 code
= LE
, op1
= const0_rtx
;
4798 else if (code
== GT
&& op1
== constm1_rtx
)
4799 code
= GE
, op1
= const0_rtx
;
4801 if (cmode
== VOIDmode
)
4802 cmode
= GET_MODE (op0
);
4804 if (swap_commutative_operands_p (op2
, op3
)
4805 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4814 if (mode
== VOIDmode
)
4815 mode
= GET_MODE (op2
);
4817 icode
= optab_handler (addcc_optab
, mode
)->insn_code
;
4819 if (icode
== CODE_FOR_nothing
)
4823 target
= gen_reg_rtx (mode
);
4825 /* If the insn doesn't accept these operands, put them in pseudos. */
4827 if (!insn_data
[icode
].operand
[0].predicate
4828 (target
, insn_data
[icode
].operand
[0].mode
))
4829 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4833 if (!insn_data
[icode
].operand
[2].predicate
4834 (op2
, insn_data
[icode
].operand
[2].mode
))
4835 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4837 if (!insn_data
[icode
].operand
[3].predicate
4838 (op3
, insn_data
[icode
].operand
[3].mode
))
4839 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4841 /* Everything should now be in the suitable form, so emit the compare insn
4842 and then the conditional move. */
4845 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4847 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4848 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4849 return NULL and let the caller figure out how best to deal with this
4851 if (GET_CODE (comparison
) != code
)
4854 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4856 /* If that failed, then give up. */
4862 if (subtarget
!= target
)
4863 convert_move (target
, subtarget
, 0);
4868 /* These functions attempt to generate an insn body, rather than
4869 emitting the insn, but if the gen function already emits them, we
4870 make no attempt to turn them back into naked patterns. */
4872 /* Generate and return an insn body to add Y to X. */
4875 gen_add2_insn (rtx x
, rtx y
)
4877 int icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4879 gcc_assert (insn_data
[icode
].operand
[0].predicate
4880 (x
, insn_data
[icode
].operand
[0].mode
));
4881 gcc_assert (insn_data
[icode
].operand
[1].predicate
4882 (x
, insn_data
[icode
].operand
[1].mode
));
4883 gcc_assert (insn_data
[icode
].operand
[2].predicate
4884 (y
, insn_data
[icode
].operand
[2].mode
));
4886 return GEN_FCN (icode
) (x
, x
, y
);
4889 /* Generate and return an insn body to add r1 and c,
4890 storing the result in r0. */
4893 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4895 int icode
= (int) optab_handler (add_optab
, GET_MODE (r0
))->insn_code
;
4897 if (icode
== CODE_FOR_nothing
4898 || !(insn_data
[icode
].operand
[0].predicate
4899 (r0
, insn_data
[icode
].operand
[0].mode
))
4900 || !(insn_data
[icode
].operand
[1].predicate
4901 (r1
, insn_data
[icode
].operand
[1].mode
))
4902 || !(insn_data
[icode
].operand
[2].predicate
4903 (c
, insn_data
[icode
].operand
[2].mode
)))
4906 return GEN_FCN (icode
) (r0
, r1
, c
);
4910 have_add2_insn (rtx x
, rtx y
)
4914 gcc_assert (GET_MODE (x
) != VOIDmode
);
4916 icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4918 if (icode
== CODE_FOR_nothing
)
4921 if (!(insn_data
[icode
].operand
[0].predicate
4922 (x
, insn_data
[icode
].operand
[0].mode
))
4923 || !(insn_data
[icode
].operand
[1].predicate
4924 (x
, insn_data
[icode
].operand
[1].mode
))
4925 || !(insn_data
[icode
].operand
[2].predicate
4926 (y
, insn_data
[icode
].operand
[2].mode
)))
4932 /* Generate and return an insn body to subtract Y from X. */
4935 gen_sub2_insn (rtx x
, rtx y
)
4937 int icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4939 gcc_assert (insn_data
[icode
].operand
[0].predicate
4940 (x
, insn_data
[icode
].operand
[0].mode
));
4941 gcc_assert (insn_data
[icode
].operand
[1].predicate
4942 (x
, insn_data
[icode
].operand
[1].mode
));
4943 gcc_assert (insn_data
[icode
].operand
[2].predicate
4944 (y
, insn_data
[icode
].operand
[2].mode
));
4946 return GEN_FCN (icode
) (x
, x
, y
);
4949 /* Generate and return an insn body to subtract r1 and c,
4950 storing the result in r0. */
4953 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4955 int icode
= (int) optab_handler (sub_optab
, GET_MODE (r0
))->insn_code
;
4957 if (icode
== CODE_FOR_nothing
4958 || !(insn_data
[icode
].operand
[0].predicate
4959 (r0
, insn_data
[icode
].operand
[0].mode
))
4960 || !(insn_data
[icode
].operand
[1].predicate
4961 (r1
, insn_data
[icode
].operand
[1].mode
))
4962 || !(insn_data
[icode
].operand
[2].predicate
4963 (c
, insn_data
[icode
].operand
[2].mode
)))
4966 return GEN_FCN (icode
) (r0
, r1
, c
);
4970 have_sub2_insn (rtx x
, rtx y
)
4974 gcc_assert (GET_MODE (x
) != VOIDmode
);
4976 icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4978 if (icode
== CODE_FOR_nothing
)
4981 if (!(insn_data
[icode
].operand
[0].predicate
4982 (x
, insn_data
[icode
].operand
[0].mode
))
4983 || !(insn_data
[icode
].operand
[1].predicate
4984 (x
, insn_data
[icode
].operand
[1].mode
))
4985 || !(insn_data
[icode
].operand
[2].predicate
4986 (y
, insn_data
[icode
].operand
[2].mode
)))
4992 /* Generate the body of an instruction to copy Y into X.
4993 It may be a list of insns, if one insn isn't enough. */
4996 gen_move_insn (rtx x
, rtx y
)
5001 emit_move_insn_1 (x
, y
);
5007 /* Return the insn code used to extend FROM_MODE to TO_MODE.
5008 UNSIGNEDP specifies zero-extension instead of sign-extension. If
5009 no such operation exists, CODE_FOR_nothing will be returned. */
5012 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
5016 #ifdef HAVE_ptr_extend
5018 return CODE_FOR_ptr_extend
;
5021 tab
= unsignedp
? zext_optab
: sext_optab
;
5022 return convert_optab_handler (tab
, to_mode
, from_mode
)->insn_code
;
5025 /* Generate the body of an insn to extend Y (with mode MFROM)
5026 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5029 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
5030 enum machine_mode mfrom
, int unsignedp
)
5032 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5033 return GEN_FCN (icode
) (x
, y
);
5036 /* can_fix_p and can_float_p say whether the target machine
5037 can directly convert a given fixed point type to
5038 a given floating point type, or vice versa.
5039 The returned value is the CODE_FOR_... value to use,
5040 or CODE_FOR_nothing if these modes cannot be directly converted.
5042 *TRUNCP_PTR is set to 1 if it is necessary to output
5043 an explicit FTRUNC insn before the fix insn; otherwise 0. */
5045 static enum insn_code
5046 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
5047 int unsignedp
, int *truncp_ptr
)
5050 enum insn_code icode
;
5052 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
5053 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5054 if (icode
!= CODE_FOR_nothing
)
5060 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
5061 for this to work. We need to rework the fix* and ftrunc* patterns
5062 and documentation. */
5063 tab
= unsignedp
? ufix_optab
: sfix_optab
;
5064 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5065 if (icode
!= CODE_FOR_nothing
5066 && optab_handler (ftrunc_optab
, fltmode
)->insn_code
!= CODE_FOR_nothing
)
5073 return CODE_FOR_nothing
;
5076 static enum insn_code
5077 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
5082 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5083 return convert_optab_handler (tab
, fltmode
, fixmode
)->insn_code
;
5086 /* Generate code to convert FROM to floating point
5087 and store in TO. FROM must be fixed point and not VOIDmode.
5088 UNSIGNEDP nonzero means regard FROM as unsigned.
5089 Normally this is done by correcting the final value
5090 if it is negative. */
5093 expand_float (rtx to
, rtx from
, int unsignedp
)
5095 enum insn_code icode
;
5097 enum machine_mode fmode
, imode
;
5098 bool can_do_signed
= false;
5100 /* Crash now, because we won't be able to decide which mode to use. */
5101 gcc_assert (GET_MODE (from
) != VOIDmode
);
5103 /* Look for an insn to do the conversion. Do it in the specified
5104 modes if possible; otherwise convert either input, output or both to
5105 wider mode. If the integer mode is wider than the mode of FROM,
5106 we can do the conversion signed even if the input is unsigned. */
5108 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5109 fmode
= GET_MODE_WIDER_MODE (fmode
))
5110 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
5111 imode
= GET_MODE_WIDER_MODE (imode
))
5113 int doing_unsigned
= unsignedp
;
5115 if (fmode
!= GET_MODE (to
)
5116 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
5119 icode
= can_float_p (fmode
, imode
, unsignedp
);
5120 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5122 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5123 if (scode
!= CODE_FOR_nothing
)
5124 can_do_signed
= true;
5125 if (imode
!= GET_MODE (from
))
5126 icode
= scode
, doing_unsigned
= 0;
5129 if (icode
!= CODE_FOR_nothing
)
5131 if (imode
!= GET_MODE (from
))
5132 from
= convert_to_mode (imode
, from
, unsignedp
);
5134 if (fmode
!= GET_MODE (to
))
5135 target
= gen_reg_rtx (fmode
);
5137 emit_unop_insn (icode
, target
, from
,
5138 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5141 convert_move (to
, target
, 0);
5146 /* Unsigned integer, and no way to convert directly. Convert as signed,
5147 then unconditionally adjust the result. */
5148 if (unsignedp
&& can_do_signed
)
5150 rtx label
= gen_label_rtx ();
5152 REAL_VALUE_TYPE offset
;
5154 /* Look for a usable floating mode FMODE wider than the source and at
5155 least as wide as the target. Using FMODE will avoid rounding woes
5156 with unsigned values greater than the signed maximum value. */
5158 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5159 fmode
= GET_MODE_WIDER_MODE (fmode
))
5160 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
5161 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
5164 if (fmode
== VOIDmode
)
5166 /* There is no such mode. Pretend the target is wide enough. */
5167 fmode
= GET_MODE (to
);
5169 /* Avoid double-rounding when TO is narrower than FROM. */
5170 if ((significand_size (fmode
) + 1)
5171 < GET_MODE_BITSIZE (GET_MODE (from
)))
5174 rtx neglabel
= gen_label_rtx ();
5176 /* Don't use TARGET if it isn't a register, is a hard register,
5177 or is the wrong mode. */
5179 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5180 || GET_MODE (target
) != fmode
)
5181 target
= gen_reg_rtx (fmode
);
5183 imode
= GET_MODE (from
);
5184 do_pending_stack_adjust ();
5186 /* Test whether the sign bit is set. */
5187 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5190 /* The sign bit is not set. Convert as signed. */
5191 expand_float (target
, from
, 0);
5192 emit_jump_insn (gen_jump (label
));
5195 /* The sign bit is set.
5196 Convert to a usable (positive signed) value by shifting right
5197 one bit, while remembering if a nonzero bit was shifted
5198 out; i.e., compute (from & 1) | (from >> 1). */
5200 emit_label (neglabel
);
5201 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5202 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5203 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
5205 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5207 expand_float (target
, temp
, 0);
5209 /* Multiply by 2 to undo the shift above. */
5210 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5211 target
, 0, OPTAB_LIB_WIDEN
);
5213 emit_move_insn (target
, temp
);
5215 do_pending_stack_adjust ();
5221 /* If we are about to do some arithmetic to correct for an
5222 unsigned operand, do it in a pseudo-register. */
5224 if (GET_MODE (to
) != fmode
5225 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5226 target
= gen_reg_rtx (fmode
);
5228 /* Convert as signed integer to floating. */
5229 expand_float (target
, from
, 0);
5231 /* If FROM is negative (and therefore TO is negative),
5232 correct its value by 2**bitwidth. */
5234 do_pending_stack_adjust ();
5235 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5239 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)), fmode
);
5240 temp
= expand_binop (fmode
, add_optab
, target
,
5241 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5242 target
, 0, OPTAB_LIB_WIDEN
);
5244 emit_move_insn (target
, temp
);
5246 do_pending_stack_adjust ();
5251 /* No hardware instruction available; call a library routine. */
5256 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5258 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
5259 from
= convert_to_mode (SImode
, from
, unsignedp
);
5261 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5262 gcc_assert (libfunc
);
5266 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5267 GET_MODE (to
), 1, from
,
5269 insns
= get_insns ();
5272 emit_libcall_block (insns
, target
, value
,
5273 gen_rtx_FLOAT (GET_MODE (to
), from
));
5278 /* Copy result to requested destination
5279 if we have been computing in a temp location. */
5283 if (GET_MODE (target
) == GET_MODE (to
))
5284 emit_move_insn (to
, target
);
5286 convert_move (to
, target
, 0);
5290 /* Generate code to convert FROM to fixed point and store in TO. FROM
5291 must be floating point. */
5294 expand_fix (rtx to
, rtx from
, int unsignedp
)
5296 enum insn_code icode
;
5298 enum machine_mode fmode
, imode
;
5301 /* We first try to find a pair of modes, one real and one integer, at
5302 least as wide as FROM and TO, respectively, in which we can open-code
5303 this conversion. If the integer mode is wider than the mode of TO,
5304 we can do the conversion either signed or unsigned. */
5306 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5307 fmode
= GET_MODE_WIDER_MODE (fmode
))
5308 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5309 imode
= GET_MODE_WIDER_MODE (imode
))
5311 int doing_unsigned
= unsignedp
;
5313 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5314 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5315 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5317 if (icode
!= CODE_FOR_nothing
)
5319 if (fmode
!= GET_MODE (from
))
5320 from
= convert_to_mode (fmode
, from
, 0);
5324 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5325 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5329 if (imode
!= GET_MODE (to
))
5330 target
= gen_reg_rtx (imode
);
5332 emit_unop_insn (icode
, target
, from
,
5333 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5335 convert_move (to
, target
, unsignedp
);
5340 /* For an unsigned conversion, there is one more way to do it.
5341 If we have a signed conversion, we generate code that compares
5342 the real value to the largest representable positive number. If if
5343 is smaller, the conversion is done normally. Otherwise, subtract
5344 one plus the highest signed number, convert, and add it back.
5346 We only need to check all real modes, since we know we didn't find
5347 anything with a wider integer mode.
5349 This code used to extend FP value into mode wider than the destination.
5350 This is needed for decimal float modes which cannot accurately
5351 represent one plus the highest signed number of the same size, but
5352 not for binary modes. Consider, for instance conversion from SFmode
5355 The hot path through the code is dealing with inputs smaller than 2^63
5356 and doing just the conversion, so there is no bits to lose.
5358 In the other path we know the value is positive in the range 2^63..2^64-1
5359 inclusive. (as for other input overflow happens and result is undefined)
5360 So we know that the most important bit set in mantissa corresponds to
5361 2^63. The subtraction of 2^63 should not generate any rounding as it
5362 simply clears out that bit. The rest is trivial. */
5364 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5365 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5366 fmode
= GET_MODE_WIDER_MODE (fmode
))
5367 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5368 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5369 || GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))))
5372 REAL_VALUE_TYPE offset
;
5373 rtx limit
, lab1
, lab2
, insn
;
5375 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5376 real_2expN (&offset
, bitsize
- 1, fmode
);
5377 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5378 lab1
= gen_label_rtx ();
5379 lab2
= gen_label_rtx ();
5381 if (fmode
!= GET_MODE (from
))
5382 from
= convert_to_mode (fmode
, from
, 0);
5384 /* See if we need to do the subtraction. */
5385 do_pending_stack_adjust ();
5386 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5389 /* If not, do the signed "fix" and branch around fixup code. */
5390 expand_fix (to
, from
, 0);
5391 emit_jump_insn (gen_jump (lab2
));
5394 /* Otherwise, subtract 2**(N-1), convert to signed number,
5395 then add 2**(N-1). Do the addition using XOR since this
5396 will often generate better code. */
5398 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5399 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5400 expand_fix (to
, target
, 0);
5401 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5403 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5405 to
, 1, OPTAB_LIB_WIDEN
);
5408 emit_move_insn (to
, target
);
5412 if (optab_handler (mov_optab
, GET_MODE (to
))->insn_code
5413 != CODE_FOR_nothing
)
5415 /* Make a place for a REG_NOTE and add it. */
5416 insn
= emit_move_insn (to
, to
);
5417 set_unique_reg_note (insn
,
5419 gen_rtx_fmt_e (UNSIGNED_FIX
,
5427 /* We can't do it with an insn, so use a library call. But first ensure
5428 that the mode of TO is at least as wide as SImode, since those are the
5429 only library calls we know about. */
5431 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5433 target
= gen_reg_rtx (SImode
);
5435 expand_fix (target
, from
, unsignedp
);
5443 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5444 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5445 gcc_assert (libfunc
);
5449 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5450 GET_MODE (to
), 1, from
,
5452 insns
= get_insns ();
5455 emit_libcall_block (insns
, target
, value
,
5456 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5457 GET_MODE (to
), from
));
5462 if (GET_MODE (to
) == GET_MODE (target
))
5463 emit_move_insn (to
, target
);
5465 convert_move (to
, target
, 0);
5469 /* Generate code to convert FROM or TO a fixed-point.
5470 If UINTP is true, either TO or FROM is an unsigned integer.
5471 If SATP is true, we need to saturate the result. */
5474 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5476 enum machine_mode to_mode
= GET_MODE (to
);
5477 enum machine_mode from_mode
= GET_MODE (from
);
5479 enum rtx_code this_code
;
5480 enum insn_code code
;
5484 if (to_mode
== from_mode
)
5486 emit_move_insn (to
, from
);
5492 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5493 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5497 tab
= satp
? satfract_optab
: fract_optab
;
5498 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5500 code
= tab
->handlers
[to_mode
][from_mode
].insn_code
;
5501 if (code
!= CODE_FOR_nothing
)
5503 emit_unop_insn (code
, to
, from
, this_code
);
5507 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5508 gcc_assert (libfunc
);
5511 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5512 1, from
, from_mode
);
5513 insns
= get_insns ();
5516 emit_libcall_block (insns
, to
, value
,
5517 gen_rtx_fmt_e (tab
->code
, to_mode
, from
));
5520 /* Generate code to convert FROM to fixed point and store in TO. FROM
5521 must be floating point, TO must be signed. Use the conversion optab
5522 TAB to do the conversion. */
5525 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5527 enum insn_code icode
;
5529 enum machine_mode fmode
, imode
;
5531 /* We first try to find a pair of modes, one real and one integer, at
5532 least as wide as FROM and TO, respectively, in which we can open-code
5533 this conversion. If the integer mode is wider than the mode of TO,
5534 we can do the conversion either signed or unsigned. */
5536 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5537 fmode
= GET_MODE_WIDER_MODE (fmode
))
5538 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5539 imode
= GET_MODE_WIDER_MODE (imode
))
5541 icode
= convert_optab_handler (tab
, imode
, fmode
)->insn_code
;
5542 if (icode
!= CODE_FOR_nothing
)
5544 if (fmode
!= GET_MODE (from
))
5545 from
= convert_to_mode (fmode
, from
, 0);
5547 if (imode
!= GET_MODE (to
))
5548 target
= gen_reg_rtx (imode
);
5550 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
5552 convert_move (to
, target
, 0);
5560 /* Report whether we have an instruction to perform the operation
5561 specified by CODE on operands of mode MODE. */
5563 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5565 return (code_to_optab
[(int) code
] != 0
5566 && (optab_handler (code_to_optab
[(int) code
], mode
)->insn_code
5567 != CODE_FOR_nothing
));
5570 /* Set all insn_code fields to CODE_FOR_nothing. */
5573 init_insn_codes (void)
5577 for (i
= 0; i
< (unsigned int) OTI_MAX
; i
++)
5582 op
= &optab_table
[i
];
5583 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5584 optab_handler (op
, j
)->insn_code
= CODE_FOR_nothing
;
5586 for (i
= 0; i
< (unsigned int) COI_MAX
; i
++)
5591 op
= &convert_optab_table
[i
];
5592 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5593 for (k
= 0; k
< NUM_MACHINE_MODES
; k
++)
5594 convert_optab_handler (op
, j
, k
)->insn_code
= CODE_FOR_nothing
;
5598 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5600 init_optab (optab op
, enum rtx_code code
)
5603 code_to_optab
[(int) code
] = op
;
5606 /* Same, but fill in its code as CODE, and do _not_ write it into
5607 the code_to_optab table. */
5609 init_optabv (optab op
, enum rtx_code code
)
5614 /* Conversion optabs never go in the code_to_optab table. */
5616 init_convert_optab (convert_optab op
, enum rtx_code code
)
5621 /* Initialize the libfunc fields of an entire group of entries in some
5622 optab. Each entry is set equal to a string consisting of a leading
5623 pair of underscores followed by a generic operation name followed by
5624 a mode name (downshifted to lowercase) followed by a single character
5625 representing the number of operands for the given operation (which is
5626 usually one of the characters '2', '3', or '4').
5628 OPTABLE is the table in which libfunc fields are to be initialized.
5629 OPNAME is the generic (string) name of the operation.
5630 SUFFIX is the character which specifies the number of operands for
5631 the given generic operation.
5632 MODE is the mode to generate for.
5636 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5638 unsigned opname_len
= strlen (opname
);
5639 const char *mname
= GET_MODE_NAME (mode
);
5640 unsigned mname_len
= strlen (mname
);
5641 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5648 for (q
= opname
; *q
; )
5650 for (q
= mname
; *q
; q
++)
5651 *p
++ = TOLOWER (*q
);
5655 set_optab_libfunc (optable
, mode
,
5656 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5659 /* Like gen_libfunc, but verify that integer operation is involved. */
5662 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5663 enum machine_mode mode
)
5665 int maxsize
= 2 * BITS_PER_WORD
;
5667 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5669 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5670 maxsize
= LONG_LONG_TYPE_SIZE
;
5671 if (GET_MODE_CLASS (mode
) != MODE_INT
5672 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5674 gen_libfunc (optable
, opname
, suffix
, mode
);
5677 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5680 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5681 enum machine_mode mode
)
5685 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5686 gen_libfunc (optable
, opname
, suffix
, mode
);
5687 if (DECIMAL_FLOAT_MODE_P (mode
))
5689 dec_opname
= alloca (sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5690 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5691 depending on the low level floating format used. */
5692 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5693 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5694 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5698 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5701 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5702 enum machine_mode mode
)
5704 if (!ALL_FIXED_POINT_MODE_P (mode
))
5706 gen_libfunc (optable
, opname
, suffix
, mode
);
5709 /* Like gen_libfunc, but verify that signed fixed-point operation is
5713 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5714 enum machine_mode mode
)
5716 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5718 gen_libfunc (optable
, opname
, suffix
, mode
);
5721 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5725 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5726 enum machine_mode mode
)
5728 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5730 gen_libfunc (optable
, opname
, suffix
, mode
);
5733 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5736 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5737 enum machine_mode mode
)
5739 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5740 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5741 if (INTEGRAL_MODE_P (mode
))
5742 gen_int_libfunc (optable
, name
, suffix
, mode
);
5745 /* Like gen_libfunc, but verify that FP or INT operation is involved
5746 and add 'v' suffix for integer operation. */
5749 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5750 enum machine_mode mode
)
5752 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5753 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5754 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5756 int len
= strlen (name
);
5757 char *v_name
= alloca (len
+ 2);
5758 strcpy (v_name
, name
);
5760 v_name
[len
+ 1] = 0;
5761 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5765 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5769 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5770 enum machine_mode mode
)
5772 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5773 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5774 if (INTEGRAL_MODE_P (mode
))
5775 gen_int_libfunc (optable
, name
, suffix
, mode
);
5776 if (ALL_FIXED_POINT_MODE_P (mode
))
5777 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5780 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5784 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5785 enum machine_mode mode
)
5787 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5788 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5789 if (INTEGRAL_MODE_P (mode
))
5790 gen_int_libfunc (optable
, name
, suffix
, mode
);
5791 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5792 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5795 /* Like gen_libfunc, but verify that INT or FIXED operation is
5799 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5800 enum machine_mode mode
)
5802 if (INTEGRAL_MODE_P (mode
))
5803 gen_int_libfunc (optable
, name
, suffix
, mode
);
5804 if (ALL_FIXED_POINT_MODE_P (mode
))
5805 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5808 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5812 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5813 enum machine_mode mode
)
5815 if (INTEGRAL_MODE_P (mode
))
5816 gen_int_libfunc (optable
, name
, suffix
, mode
);
5817 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5818 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5821 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5825 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5826 enum machine_mode mode
)
5828 if (INTEGRAL_MODE_P (mode
))
5829 gen_int_libfunc (optable
, name
, suffix
, mode
);
5830 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5831 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5834 /* Initialize the libfunc fields of an entire group of entries of an
5835 inter-mode-class conversion optab. The string formation rules are
5836 similar to the ones for init_libfuncs, above, but instead of having
5837 a mode name and an operand count these functions have two mode names
5838 and no operand count. */
5841 gen_interclass_conv_libfunc (convert_optab tab
,
5843 enum machine_mode tmode
,
5844 enum machine_mode fmode
)
5846 size_t opname_len
= strlen (opname
);
5847 size_t mname_len
= 0;
5849 const char *fname
, *tname
;
5851 char *libfunc_name
, *suffix
;
5852 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5855 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5856 depends on which underlying decimal floating point format is used. */
5857 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5859 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5861 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5862 nondec_name
[0] = '_';
5863 nondec_name
[1] = '_';
5864 memcpy (&nondec_name
[2], opname
, opname_len
);
5865 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5867 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5870 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5871 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5872 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5874 fname
= GET_MODE_NAME (fmode
);
5875 tname
= GET_MODE_NAME (tmode
);
5877 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5879 libfunc_name
= dec_name
;
5880 suffix
= dec_suffix
;
5884 libfunc_name
= nondec_name
;
5885 suffix
= nondec_suffix
;
5889 for (q
= fname
; *q
; p
++, q
++)
5891 for (q
= tname
; *q
; p
++, q
++)
5896 set_conv_libfunc (tab
, tmode
, fmode
,
5897 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5900 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5901 int->fp conversion. */
5904 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5906 enum machine_mode tmode
,
5907 enum machine_mode fmode
)
5909 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5911 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5913 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5916 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5920 gen_ufloat_conv_libfunc (convert_optab tab
,
5921 const char *opname ATTRIBUTE_UNUSED
,
5922 enum machine_mode tmode
,
5923 enum machine_mode fmode
)
5925 if (DECIMAL_FLOAT_MODE_P (tmode
))
5926 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5928 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5931 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5932 fp->int conversion. */
5935 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5937 enum machine_mode tmode
,
5938 enum machine_mode fmode
)
5940 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5942 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5944 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5947 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5948 fp->int conversion with no decimal floating point involved. */
5951 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5953 enum machine_mode tmode
,
5954 enum machine_mode fmode
)
5956 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5958 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5960 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5963 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5964 The string formation rules are
5965 similar to the ones for init_libfunc, above. */
5968 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5969 enum machine_mode tmode
, enum machine_mode fmode
)
5971 size_t opname_len
= strlen (opname
);
5972 size_t mname_len
= 0;
5974 const char *fname
, *tname
;
5976 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5977 char *libfunc_name
, *suffix
;
5980 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5981 depends on which underlying decimal floating point format is used. */
5982 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5984 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5986 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5987 nondec_name
[0] = '_';
5988 nondec_name
[1] = '_';
5989 memcpy (&nondec_name
[2], opname
, opname_len
);
5990 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5992 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5995 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5996 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5997 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5999 fname
= GET_MODE_NAME (fmode
);
6000 tname
= GET_MODE_NAME (tmode
);
6002 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
6004 libfunc_name
= dec_name
;
6005 suffix
= dec_suffix
;
6009 libfunc_name
= nondec_name
;
6010 suffix
= nondec_suffix
;
6014 for (q
= fname
; *q
; p
++, q
++)
6016 for (q
= tname
; *q
; p
++, q
++)
6022 set_conv_libfunc (tab
, tmode
, fmode
,
6023 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
6026 /* Pick proper libcall for trunc_optab. We need to chose if we do
6027 truncation or extension and interclass or intraclass. */
6030 gen_trunc_conv_libfunc (convert_optab tab
,
6032 enum machine_mode tmode
,
6033 enum machine_mode fmode
)
6035 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6037 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6042 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6043 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6044 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6046 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
6049 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6050 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6051 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6052 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6055 /* Pick proper libcall for extend_optab. We need to chose if we do
6056 truncation or extension and interclass or intraclass. */
6059 gen_extend_conv_libfunc (convert_optab tab
,
6060 const char *opname ATTRIBUTE_UNUSED
,
6061 enum machine_mode tmode
,
6062 enum machine_mode fmode
)
6064 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6066 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6071 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6072 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6073 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6075 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
6078 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6079 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6080 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6081 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6084 /* Pick proper libcall for fract_optab. We need to chose if we do
6085 interclass or intraclass. */
6088 gen_fract_conv_libfunc (convert_optab tab
,
6090 enum machine_mode tmode
,
6091 enum machine_mode fmode
)
6095 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
6098 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6099 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6101 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6104 /* Pick proper libcall for fractuns_optab. */
6107 gen_fractuns_conv_libfunc (convert_optab tab
,
6109 enum machine_mode tmode
,
6110 enum machine_mode fmode
)
6114 /* One mode must be a fixed-point mode, and the other must be an integer
6116 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
6117 || (ALL_FIXED_POINT_MODE_P (fmode
)
6118 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
6121 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6124 /* Pick proper libcall for satfract_optab. We need to chose if we do
6125 interclass or intraclass. */
6128 gen_satfract_conv_libfunc (convert_optab tab
,
6130 enum machine_mode tmode
,
6131 enum machine_mode fmode
)
6135 /* TMODE must be a fixed-point mode. */
6136 if (!ALL_FIXED_POINT_MODE_P (tmode
))
6139 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6140 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6142 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6145 /* Pick proper libcall for satfractuns_optab. */
6148 gen_satfractuns_conv_libfunc (convert_optab tab
,
6150 enum machine_mode tmode
,
6151 enum machine_mode fmode
)
6155 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6156 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
6159 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6163 init_one_libfunc (const char *name
)
6167 /* Create a FUNCTION_DECL that can be passed to
6168 targetm.encode_section_info. */
6169 /* ??? We don't have any type information except for this is
6170 a function. Pretend this is "int foo()". */
6171 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
6172 build_function_type (integer_type_node
, NULL_TREE
));
6173 DECL_ARTIFICIAL (decl
) = 1;
6174 DECL_EXTERNAL (decl
) = 1;
6175 TREE_PUBLIC (decl
) = 1;
6177 symbol
= XEXP (DECL_RTL (decl
), 0);
6179 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6180 are the flags assigned by targetm.encode_section_info. */
6181 SET_SYMBOL_REF_DECL (symbol
, 0);
6186 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6187 MODE to NAME, which should be either 0 or a string constant. */
6189 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
6192 struct libfunc_entry e
;
6193 struct libfunc_entry
**slot
;
6194 e
.optab
= (size_t) (optable
- &optab_table
[0]);
6199 val
= init_one_libfunc (name
);
6202 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6204 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
6205 (*slot
)->optab
= (size_t) (optable
- &optab_table
[0]);
6206 (*slot
)->mode1
= mode
;
6207 (*slot
)->mode2
= VOIDmode
;
6208 (*slot
)->libfunc
= val
;
6211 /* Call this to reset the function entry for one conversion optab
6212 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6213 either 0 or a string constant. */
6215 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
6216 enum machine_mode fmode
, const char *name
)
6219 struct libfunc_entry e
;
6220 struct libfunc_entry
**slot
;
6221 e
.optab
= (size_t) (optable
- &convert_optab_table
[0]);
6226 val
= init_one_libfunc (name
);
6229 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6231 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
6232 (*slot
)->optab
= (size_t) (optable
- &convert_optab_table
[0]);
6233 (*slot
)->mode1
= tmode
;
6234 (*slot
)->mode2
= fmode
;
6235 (*slot
)->libfunc
= val
;
6238 /* Call this to initialize the contents of the optabs
6239 appropriately for the current target machine. */
6245 enum machine_mode int_mode
;
6248 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
6249 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6251 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
6252 setcc_gen_code
[i
] = CODE_FOR_nothing
;
6254 #ifdef HAVE_conditional_move
6255 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6256 movcc_gen_code
[i
] = CODE_FOR_nothing
;
6259 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6261 vcond_gen_code
[i
] = CODE_FOR_nothing
;
6262 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
6265 #if GCC_VERSION >= 4000
6266 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6273 init_optab (add_optab
, PLUS
);
6274 init_optabv (addv_optab
, PLUS
);
6275 init_optab (sub_optab
, MINUS
);
6276 init_optabv (subv_optab
, MINUS
);
6277 init_optab (ssadd_optab
, SS_PLUS
);
6278 init_optab (usadd_optab
, US_PLUS
);
6279 init_optab (sssub_optab
, SS_MINUS
);
6280 init_optab (ussub_optab
, US_MINUS
);
6281 init_optab (smul_optab
, MULT
);
6282 init_optab (ssmul_optab
, SS_MULT
);
6283 init_optab (usmul_optab
, US_MULT
);
6284 init_optabv (smulv_optab
, MULT
);
6285 init_optab (smul_highpart_optab
, UNKNOWN
);
6286 init_optab (umul_highpart_optab
, UNKNOWN
);
6287 init_optab (smul_widen_optab
, UNKNOWN
);
6288 init_optab (umul_widen_optab
, UNKNOWN
);
6289 init_optab (usmul_widen_optab
, UNKNOWN
);
6290 init_optab (smadd_widen_optab
, UNKNOWN
);
6291 init_optab (umadd_widen_optab
, UNKNOWN
);
6292 init_optab (ssmadd_widen_optab
, UNKNOWN
);
6293 init_optab (usmadd_widen_optab
, UNKNOWN
);
6294 init_optab (smsub_widen_optab
, UNKNOWN
);
6295 init_optab (umsub_widen_optab
, UNKNOWN
);
6296 init_optab (ssmsub_widen_optab
, UNKNOWN
);
6297 init_optab (usmsub_widen_optab
, UNKNOWN
);
6298 init_optab (sdiv_optab
, DIV
);
6299 init_optab (ssdiv_optab
, SS_DIV
);
6300 init_optab (usdiv_optab
, US_DIV
);
6301 init_optabv (sdivv_optab
, DIV
);
6302 init_optab (sdivmod_optab
, UNKNOWN
);
6303 init_optab (udiv_optab
, UDIV
);
6304 init_optab (udivmod_optab
, UNKNOWN
);
6305 init_optab (smod_optab
, MOD
);
6306 init_optab (umod_optab
, UMOD
);
6307 init_optab (fmod_optab
, UNKNOWN
);
6308 init_optab (remainder_optab
, UNKNOWN
);
6309 init_optab (ftrunc_optab
, UNKNOWN
);
6310 init_optab (and_optab
, AND
);
6311 init_optab (ior_optab
, IOR
);
6312 init_optab (xor_optab
, XOR
);
6313 init_optab (ashl_optab
, ASHIFT
);
6314 init_optab (ssashl_optab
, SS_ASHIFT
);
6315 init_optab (usashl_optab
, US_ASHIFT
);
6316 init_optab (ashr_optab
, ASHIFTRT
);
6317 init_optab (lshr_optab
, LSHIFTRT
);
6318 init_optab (rotl_optab
, ROTATE
);
6319 init_optab (rotr_optab
, ROTATERT
);
6320 init_optab (smin_optab
, SMIN
);
6321 init_optab (smax_optab
, SMAX
);
6322 init_optab (umin_optab
, UMIN
);
6323 init_optab (umax_optab
, UMAX
);
6324 init_optab (pow_optab
, UNKNOWN
);
6325 init_optab (atan2_optab
, UNKNOWN
);
6327 /* These three have codes assigned exclusively for the sake of
6329 init_optab (mov_optab
, SET
);
6330 init_optab (movstrict_optab
, STRICT_LOW_PART
);
6331 init_optab (cmp_optab
, COMPARE
);
6333 init_optab (storent_optab
, UNKNOWN
);
6335 init_optab (ucmp_optab
, UNKNOWN
);
6336 init_optab (tst_optab
, UNKNOWN
);
6338 init_optab (eq_optab
, EQ
);
6339 init_optab (ne_optab
, NE
);
6340 init_optab (gt_optab
, GT
);
6341 init_optab (ge_optab
, GE
);
6342 init_optab (lt_optab
, LT
);
6343 init_optab (le_optab
, LE
);
6344 init_optab (unord_optab
, UNORDERED
);
6346 init_optab (neg_optab
, NEG
);
6347 init_optab (ssneg_optab
, SS_NEG
);
6348 init_optab (usneg_optab
, US_NEG
);
6349 init_optabv (negv_optab
, NEG
);
6350 init_optab (abs_optab
, ABS
);
6351 init_optabv (absv_optab
, ABS
);
6352 init_optab (addcc_optab
, UNKNOWN
);
6353 init_optab (one_cmpl_optab
, NOT
);
6354 init_optab (bswap_optab
, BSWAP
);
6355 init_optab (ffs_optab
, FFS
);
6356 init_optab (clz_optab
, CLZ
);
6357 init_optab (ctz_optab
, CTZ
);
6358 init_optab (popcount_optab
, POPCOUNT
);
6359 init_optab (parity_optab
, PARITY
);
6360 init_optab (sqrt_optab
, SQRT
);
6361 init_optab (floor_optab
, UNKNOWN
);
6362 init_optab (ceil_optab
, UNKNOWN
);
6363 init_optab (round_optab
, UNKNOWN
);
6364 init_optab (btrunc_optab
, UNKNOWN
);
6365 init_optab (nearbyint_optab
, UNKNOWN
);
6366 init_optab (rint_optab
, UNKNOWN
);
6367 init_optab (sincos_optab
, UNKNOWN
);
6368 init_optab (sin_optab
, UNKNOWN
);
6369 init_optab (asin_optab
, UNKNOWN
);
6370 init_optab (cos_optab
, UNKNOWN
);
6371 init_optab (acos_optab
, UNKNOWN
);
6372 init_optab (exp_optab
, UNKNOWN
);
6373 init_optab (exp10_optab
, UNKNOWN
);
6374 init_optab (exp2_optab
, UNKNOWN
);
6375 init_optab (expm1_optab
, UNKNOWN
);
6376 init_optab (ldexp_optab
, UNKNOWN
);
6377 init_optab (scalb_optab
, UNKNOWN
);
6378 init_optab (logb_optab
, UNKNOWN
);
6379 init_optab (ilogb_optab
, UNKNOWN
);
6380 init_optab (log_optab
, UNKNOWN
);
6381 init_optab (log10_optab
, UNKNOWN
);
6382 init_optab (log2_optab
, UNKNOWN
);
6383 init_optab (log1p_optab
, UNKNOWN
);
6384 init_optab (tan_optab
, UNKNOWN
);
6385 init_optab (atan_optab
, UNKNOWN
);
6386 init_optab (copysign_optab
, UNKNOWN
);
6387 init_optab (signbit_optab
, UNKNOWN
);
6389 init_optab (isinf_optab
, UNKNOWN
);
6391 init_optab (strlen_optab
, UNKNOWN
);
6392 init_optab (cbranch_optab
, UNKNOWN
);
6393 init_optab (cmov_optab
, UNKNOWN
);
6394 init_optab (cstore_optab
, UNKNOWN
);
6395 init_optab (push_optab
, UNKNOWN
);
6397 init_optab (reduc_smax_optab
, UNKNOWN
);
6398 init_optab (reduc_umax_optab
, UNKNOWN
);
6399 init_optab (reduc_smin_optab
, UNKNOWN
);
6400 init_optab (reduc_umin_optab
, UNKNOWN
);
6401 init_optab (reduc_splus_optab
, UNKNOWN
);
6402 init_optab (reduc_uplus_optab
, UNKNOWN
);
6404 init_optab (ssum_widen_optab
, UNKNOWN
);
6405 init_optab (usum_widen_optab
, UNKNOWN
);
6406 init_optab (sdot_prod_optab
, UNKNOWN
);
6407 init_optab (udot_prod_optab
, UNKNOWN
);
6409 init_optab (vec_extract_optab
, UNKNOWN
);
6410 init_optab (vec_extract_even_optab
, UNKNOWN
);
6411 init_optab (vec_extract_odd_optab
, UNKNOWN
);
6412 init_optab (vec_interleave_high_optab
, UNKNOWN
);
6413 init_optab (vec_interleave_low_optab
, UNKNOWN
);
6414 init_optab (vec_set_optab
, UNKNOWN
);
6415 init_optab (vec_init_optab
, UNKNOWN
);
6416 init_optab (vec_shl_optab
, UNKNOWN
);
6417 init_optab (vec_shr_optab
, UNKNOWN
);
6418 init_optab (vec_realign_load_optab
, UNKNOWN
);
6419 init_optab (movmisalign_optab
, UNKNOWN
);
6420 init_optab (vec_widen_umult_hi_optab
, UNKNOWN
);
6421 init_optab (vec_widen_umult_lo_optab
, UNKNOWN
);
6422 init_optab (vec_widen_smult_hi_optab
, UNKNOWN
);
6423 init_optab (vec_widen_smult_lo_optab
, UNKNOWN
);
6424 init_optab (vec_unpacks_hi_optab
, UNKNOWN
);
6425 init_optab (vec_unpacks_lo_optab
, UNKNOWN
);
6426 init_optab (vec_unpacku_hi_optab
, UNKNOWN
);
6427 init_optab (vec_unpacku_lo_optab
, UNKNOWN
);
6428 init_optab (vec_unpacks_float_hi_optab
, UNKNOWN
);
6429 init_optab (vec_unpacks_float_lo_optab
, UNKNOWN
);
6430 init_optab (vec_unpacku_float_hi_optab
, UNKNOWN
);
6431 init_optab (vec_unpacku_float_lo_optab
, UNKNOWN
);
6432 init_optab (vec_pack_trunc_optab
, UNKNOWN
);
6433 init_optab (vec_pack_usat_optab
, UNKNOWN
);
6434 init_optab (vec_pack_ssat_optab
, UNKNOWN
);
6435 init_optab (vec_pack_ufix_trunc_optab
, UNKNOWN
);
6436 init_optab (vec_pack_sfix_trunc_optab
, UNKNOWN
);
6438 init_optab (powi_optab
, UNKNOWN
);
6441 init_convert_optab (sext_optab
, SIGN_EXTEND
);
6442 init_convert_optab (zext_optab
, ZERO_EXTEND
);
6443 init_convert_optab (trunc_optab
, TRUNCATE
);
6444 init_convert_optab (sfix_optab
, FIX
);
6445 init_convert_optab (ufix_optab
, UNSIGNED_FIX
);
6446 init_convert_optab (sfixtrunc_optab
, UNKNOWN
);
6447 init_convert_optab (ufixtrunc_optab
, UNKNOWN
);
6448 init_convert_optab (sfloat_optab
, FLOAT
);
6449 init_convert_optab (ufloat_optab
, UNSIGNED_FLOAT
);
6450 init_convert_optab (lrint_optab
, UNKNOWN
);
6451 init_convert_optab (lround_optab
, UNKNOWN
);
6452 init_convert_optab (lfloor_optab
, UNKNOWN
);
6453 init_convert_optab (lceil_optab
, UNKNOWN
);
6455 init_convert_optab (fract_optab
, FRACT_CONVERT
);
6456 init_convert_optab (fractuns_optab
, UNSIGNED_FRACT_CONVERT
);
6457 init_convert_optab (satfract_optab
, SAT_FRACT
);
6458 init_convert_optab (satfractuns_optab
, UNSIGNED_SAT_FRACT
);
6460 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6462 movmem_optab
[i
] = CODE_FOR_nothing
;
6463 cmpstr_optab
[i
] = CODE_FOR_nothing
;
6464 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
6465 cmpmem_optab
[i
] = CODE_FOR_nothing
;
6466 setmem_optab
[i
] = CODE_FOR_nothing
;
6468 sync_add_optab
[i
] = CODE_FOR_nothing
;
6469 sync_sub_optab
[i
] = CODE_FOR_nothing
;
6470 sync_ior_optab
[i
] = CODE_FOR_nothing
;
6471 sync_and_optab
[i
] = CODE_FOR_nothing
;
6472 sync_xor_optab
[i
] = CODE_FOR_nothing
;
6473 sync_nand_optab
[i
] = CODE_FOR_nothing
;
6474 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
6475 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
6476 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
6477 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
6478 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
6479 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
6480 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
6481 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
6482 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
6483 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
6484 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
6485 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
6486 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
6487 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
6488 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
6489 sync_lock_release
[i
] = CODE_FOR_nothing
;
6491 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
6494 /* Fill in the optabs with the insns we support. */
6497 /* Initialize the optabs with the names of the library functions. */
6498 add_optab
->libcall_basename
= "add";
6499 add_optab
->libcall_suffix
= '3';
6500 add_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6501 addv_optab
->libcall_basename
= "add";
6502 addv_optab
->libcall_suffix
= '3';
6503 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6504 ssadd_optab
->libcall_basename
= "ssadd";
6505 ssadd_optab
->libcall_suffix
= '3';
6506 ssadd_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6507 usadd_optab
->libcall_basename
= "usadd";
6508 usadd_optab
->libcall_suffix
= '3';
6509 usadd_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6510 sub_optab
->libcall_basename
= "sub";
6511 sub_optab
->libcall_suffix
= '3';
6512 sub_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6513 subv_optab
->libcall_basename
= "sub";
6514 subv_optab
->libcall_suffix
= '3';
6515 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6516 sssub_optab
->libcall_basename
= "sssub";
6517 sssub_optab
->libcall_suffix
= '3';
6518 sssub_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6519 ussub_optab
->libcall_basename
= "ussub";
6520 ussub_optab
->libcall_suffix
= '3';
6521 ussub_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6522 smul_optab
->libcall_basename
= "mul";
6523 smul_optab
->libcall_suffix
= '3';
6524 smul_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6525 smulv_optab
->libcall_basename
= "mul";
6526 smulv_optab
->libcall_suffix
= '3';
6527 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6528 ssmul_optab
->libcall_basename
= "ssmul";
6529 ssmul_optab
->libcall_suffix
= '3';
6530 ssmul_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6531 usmul_optab
->libcall_basename
= "usmul";
6532 usmul_optab
->libcall_suffix
= '3';
6533 usmul_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6534 sdiv_optab
->libcall_basename
= "div";
6535 sdiv_optab
->libcall_suffix
= '3';
6536 sdiv_optab
->libcall_gen
= gen_int_fp_signed_fixed_libfunc
;
6537 sdivv_optab
->libcall_basename
= "divv";
6538 sdivv_optab
->libcall_suffix
= '3';
6539 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6540 ssdiv_optab
->libcall_basename
= "ssdiv";
6541 ssdiv_optab
->libcall_suffix
= '3';
6542 ssdiv_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6543 udiv_optab
->libcall_basename
= "udiv";
6544 udiv_optab
->libcall_suffix
= '3';
6545 udiv_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6546 usdiv_optab
->libcall_basename
= "usdiv";
6547 usdiv_optab
->libcall_suffix
= '3';
6548 usdiv_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6549 sdivmod_optab
->libcall_basename
= "divmod";
6550 sdivmod_optab
->libcall_suffix
= '4';
6551 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6552 udivmod_optab
->libcall_basename
= "udivmod";
6553 udivmod_optab
->libcall_suffix
= '4';
6554 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6555 smod_optab
->libcall_basename
= "mod";
6556 smod_optab
->libcall_suffix
= '3';
6557 smod_optab
->libcall_gen
= gen_int_libfunc
;
6558 umod_optab
->libcall_basename
= "umod";
6559 umod_optab
->libcall_suffix
= '3';
6560 umod_optab
->libcall_gen
= gen_int_libfunc
;
6561 ftrunc_optab
->libcall_basename
= "ftrunc";
6562 ftrunc_optab
->libcall_suffix
= '2';
6563 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6564 and_optab
->libcall_basename
= "and";
6565 and_optab
->libcall_suffix
= '3';
6566 and_optab
->libcall_gen
= gen_int_libfunc
;
6567 ior_optab
->libcall_basename
= "ior";
6568 ior_optab
->libcall_suffix
= '3';
6569 ior_optab
->libcall_gen
= gen_int_libfunc
;
6570 xor_optab
->libcall_basename
= "xor";
6571 xor_optab
->libcall_suffix
= '3';
6572 xor_optab
->libcall_gen
= gen_int_libfunc
;
6573 ashl_optab
->libcall_basename
= "ashl";
6574 ashl_optab
->libcall_suffix
= '3';
6575 ashl_optab
->libcall_gen
= gen_int_fixed_libfunc
;
6576 ssashl_optab
->libcall_basename
= "ssashl";
6577 ssashl_optab
->libcall_suffix
= '3';
6578 ssashl_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6579 usashl_optab
->libcall_basename
= "usashl";
6580 usashl_optab
->libcall_suffix
= '3';
6581 usashl_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6582 ashr_optab
->libcall_basename
= "ashr";
6583 ashr_optab
->libcall_suffix
= '3';
6584 ashr_optab
->libcall_gen
= gen_int_signed_fixed_libfunc
;
6585 lshr_optab
->libcall_basename
= "lshr";
6586 lshr_optab
->libcall_suffix
= '3';
6587 lshr_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6588 smin_optab
->libcall_basename
= "min";
6589 smin_optab
->libcall_suffix
= '3';
6590 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6591 smax_optab
->libcall_basename
= "max";
6592 smax_optab
->libcall_suffix
= '3';
6593 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6594 umin_optab
->libcall_basename
= "umin";
6595 umin_optab
->libcall_suffix
= '3';
6596 umin_optab
->libcall_gen
= gen_int_libfunc
;
6597 umax_optab
->libcall_basename
= "umax";
6598 umax_optab
->libcall_suffix
= '3';
6599 umax_optab
->libcall_gen
= gen_int_libfunc
;
6600 neg_optab
->libcall_basename
= "neg";
6601 neg_optab
->libcall_suffix
= '2';
6602 neg_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6603 ssneg_optab
->libcall_basename
= "ssneg";
6604 ssneg_optab
->libcall_suffix
= '2';
6605 ssneg_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6606 usneg_optab
->libcall_basename
= "usneg";
6607 usneg_optab
->libcall_suffix
= '2';
6608 usneg_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6609 negv_optab
->libcall_basename
= "neg";
6610 negv_optab
->libcall_suffix
= '2';
6611 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6612 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6613 one_cmpl_optab
->libcall_suffix
= '2';
6614 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6615 ffs_optab
->libcall_basename
= "ffs";
6616 ffs_optab
->libcall_suffix
= '2';
6617 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6618 clz_optab
->libcall_basename
= "clz";
6619 clz_optab
->libcall_suffix
= '2';
6620 clz_optab
->libcall_gen
= gen_int_libfunc
;
6621 ctz_optab
->libcall_basename
= "ctz";
6622 ctz_optab
->libcall_suffix
= '2';
6623 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6624 popcount_optab
->libcall_basename
= "popcount";
6625 popcount_optab
->libcall_suffix
= '2';
6626 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6627 parity_optab
->libcall_basename
= "parity";
6628 parity_optab
->libcall_suffix
= '2';
6629 parity_optab
->libcall_gen
= gen_int_libfunc
;
6631 /* Comparison libcalls for integers MUST come in pairs,
6633 cmp_optab
->libcall_basename
= "cmp";
6634 cmp_optab
->libcall_suffix
= '2';
6635 cmp_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6636 ucmp_optab
->libcall_basename
= "ucmp";
6637 ucmp_optab
->libcall_suffix
= '2';
6638 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6640 /* EQ etc are floating point only. */
6641 eq_optab
->libcall_basename
= "eq";
6642 eq_optab
->libcall_suffix
= '2';
6643 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6644 ne_optab
->libcall_basename
= "ne";
6645 ne_optab
->libcall_suffix
= '2';
6646 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6647 gt_optab
->libcall_basename
= "gt";
6648 gt_optab
->libcall_suffix
= '2';
6649 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6650 ge_optab
->libcall_basename
= "ge";
6651 ge_optab
->libcall_suffix
= '2';
6652 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6653 lt_optab
->libcall_basename
= "lt";
6654 lt_optab
->libcall_suffix
= '2';
6655 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6656 le_optab
->libcall_basename
= "le";
6657 le_optab
->libcall_suffix
= '2';
6658 le_optab
->libcall_gen
= gen_fp_libfunc
;
6659 unord_optab
->libcall_basename
= "unord";
6660 unord_optab
->libcall_suffix
= '2';
6661 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6663 powi_optab
->libcall_basename
= "powi";
6664 powi_optab
->libcall_suffix
= '2';
6665 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6668 sfloat_optab
->libcall_basename
= "float";
6669 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6670 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6671 sfix_optab
->libcall_basename
= "fix";
6672 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6673 ufix_optab
->libcall_basename
= "fixuns";
6674 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6675 lrint_optab
->libcall_basename
= "lrint";
6676 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6677 lround_optab
->libcall_basename
= "lround";
6678 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6679 lfloor_optab
->libcall_basename
= "lfloor";
6680 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6681 lceil_optab
->libcall_basename
= "lceil";
6682 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6684 /* trunc_optab is also used for FLOAT_EXTEND. */
6685 sext_optab
->libcall_basename
= "extend";
6686 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6687 trunc_optab
->libcall_basename
= "trunc";
6688 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6690 /* Conversions for fixed-point modes and other modes. */
6691 fract_optab
->libcall_basename
= "fract";
6692 fract_optab
->libcall_gen
= gen_fract_conv_libfunc
;
6693 satfract_optab
->libcall_basename
= "satfract";
6694 satfract_optab
->libcall_gen
= gen_satfract_conv_libfunc
;
6695 fractuns_optab
->libcall_basename
= "fractuns";
6696 fractuns_optab
->libcall_gen
= gen_fractuns_conv_libfunc
;
6697 satfractuns_optab
->libcall_basename
= "satfractuns";
6698 satfractuns_optab
->libcall_gen
= gen_satfractuns_conv_libfunc
;
6700 /* The ffs function operates on `int'. Fall back on it if we do not
6701 have a libgcc2 function for that width. */
6702 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6704 int_mode
= mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0);
6705 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6709 /* Explicitly initialize the bswap libfuncs since we need them to be
6710 valid for things other than word_mode. */
6711 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6712 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6714 /* Use cabs for double complex abs, since systems generally have cabs.
6715 Don't define any libcall for float complex, so that cabs will be used. */
6716 if (complex_double_type_node
)
6717 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6719 abort_libfunc
= init_one_libfunc ("abort");
6720 memcpy_libfunc
= init_one_libfunc ("memcpy");
6721 memmove_libfunc
= init_one_libfunc ("memmove");
6722 memcmp_libfunc
= init_one_libfunc ("memcmp");
6723 memset_libfunc
= init_one_libfunc ("memset");
6724 setbits_libfunc
= init_one_libfunc ("__setbits");
6726 #ifndef DONT_USE_BUILTIN_SETJMP
6727 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6728 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6730 setjmp_libfunc
= init_one_libfunc ("setjmp");
6731 longjmp_libfunc
= init_one_libfunc ("longjmp");
6733 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6734 unwind_sjlj_unregister_libfunc
6735 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6737 /* For function entry/exit instrumentation. */
6738 profile_function_entry_libfunc
6739 = init_one_libfunc ("__cyg_profile_func_enter");
6740 profile_function_exit_libfunc
6741 = init_one_libfunc ("__cyg_profile_func_exit");
6743 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6745 if (HAVE_conditional_trap
)
6746 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
6748 /* Allow the target to add more libcalls or rename some, etc. */
6749 targetm
.init_libfuncs ();
6754 /* Print information about the current contents of the optabs on
6758 debug_optab_libfuncs (void)
6764 /* Dump the arithmetic optabs. */
6765 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6766 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6771 o
= &optab_table
[i
];
6772 l
= optab_libfunc (o
, j
);
6775 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6776 fprintf (stderr
, "%s\t%s:\t%s\n",
6777 GET_RTX_NAME (o
->code
),
6783 /* Dump the conversion optabs. */
6784 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6785 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6786 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6791 o
= &convert_optab_table
[i
];
6792 l
= convert_optab_libfunc (o
, j
, k
);
6795 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6796 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6797 GET_RTX_NAME (o
->code
),
6806 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6807 CODE. Return 0 on failure. */
6810 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
6811 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
6813 enum machine_mode mode
= GET_MODE (op1
);
6814 enum insn_code icode
;
6817 if (!HAVE_conditional_trap
)
6820 if (mode
== VOIDmode
)
6823 icode
= optab_handler (cmp_optab
, mode
)->insn_code
;
6824 if (icode
== CODE_FOR_nothing
)
6828 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
6829 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
6835 emit_insn (GEN_FCN (icode
) (op1
, op2
));
6837 PUT_CODE (trap_rtx
, code
);
6838 gcc_assert (HAVE_conditional_trap
);
6839 insn
= gen_conditional_trap (trap_rtx
, tcode
);
6843 insn
= get_insns ();
6850 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6851 or unsigned operation code. */
6853 static enum rtx_code
6854 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6866 code
= unsignedp
? LTU
: LT
;
6869 code
= unsignedp
? LEU
: LE
;
6872 code
= unsignedp
? GTU
: GT
;
6875 code
= unsignedp
? GEU
: GE
;
6878 case UNORDERED_EXPR
:
6909 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6910 unsigned operators. Do not generate compare instruction. */
6913 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6915 enum rtx_code rcode
;
6917 rtx rtx_op0
, rtx_op1
;
6919 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6920 ensures that condition is a relational operation. */
6921 gcc_assert (COMPARISON_CLASS_P (cond
));
6923 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6924 t_op0
= TREE_OPERAND (cond
, 0);
6925 t_op1
= TREE_OPERAND (cond
, 1);
6927 /* Expand operands. */
6928 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6930 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6933 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
6934 && GET_MODE (rtx_op0
) != VOIDmode
)
6935 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
6937 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
6938 && GET_MODE (rtx_op1
) != VOIDmode
)
6939 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
6941 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
6944 /* Return insn code for VEC_COND_EXPR EXPR. */
6946 static inline enum insn_code
6947 get_vcond_icode (tree expr
, enum machine_mode mode
)
6949 enum insn_code icode
= CODE_FOR_nothing
;
6951 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
6952 icode
= vcondu_gen_code
[mode
];
6954 icode
= vcond_gen_code
[mode
];
6958 /* Return TRUE iff, appropriate vector insns are available
6959 for vector cond expr expr in VMODE mode. */
6962 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
6964 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
6969 /* Generate insns for VEC_COND_EXPR. */
6972 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
6974 enum insn_code icode
;
6975 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
6976 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
6977 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
6979 icode
= get_vcond_icode (vec_cond_expr
, mode
);
6980 if (icode
== CODE_FOR_nothing
)
6983 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6984 target
= gen_reg_rtx (mode
);
6986 /* Get comparison rtx. First expand both cond expr operands. */
6987 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
6989 cc_op0
= XEXP (comparison
, 0);
6990 cc_op1
= XEXP (comparison
, 1);
6991 /* Expand both operands and force them in reg, if required. */
6992 rtx_op1
= expand_normal (TREE_OPERAND (vec_cond_expr
, 1));
6993 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
6994 && mode
!= VOIDmode
)
6995 rtx_op1
= force_reg (mode
, rtx_op1
);
6997 rtx_op2
= expand_normal (TREE_OPERAND (vec_cond_expr
, 2));
6998 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
6999 && mode
!= VOIDmode
)
7000 rtx_op2
= force_reg (mode
, rtx_op2
);
7002 /* Emit instruction! */
7003 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
7004 comparison
, cc_op0
, cc_op1
));
7010 /* This is an internal subroutine of the other compare_and_swap expanders.
7011 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
7012 operation. TARGET is an optional place to store the value result of
7013 the operation. ICODE is the particular instruction to expand. Return
7014 the result of the operation. */
7017 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
7018 rtx target
, enum insn_code icode
)
7020 enum machine_mode mode
= GET_MODE (mem
);
7023 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7024 target
= gen_reg_rtx (mode
);
7026 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
7027 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
7028 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
7029 old_val
= force_reg (mode
, old_val
);
7031 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
7032 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
7033 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
7034 new_val
= force_reg (mode
, new_val
);
7036 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
7037 if (insn
== NULL_RTX
)
7044 /* Expand a compare-and-swap operation and return its value. */
7047 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7049 enum machine_mode mode
= GET_MODE (mem
);
7050 enum insn_code icode
= sync_compare_and_swap
[mode
];
7052 if (icode
== CODE_FOR_nothing
)
7055 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
7058 /* Expand a compare-and-swap operation and store true into the result if
7059 the operation was successful and false otherwise. Return the result.
7060 Unlike other routines, TARGET is not optional. */
7063 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7065 enum machine_mode mode
= GET_MODE (mem
);
7066 enum insn_code icode
;
7067 rtx subtarget
, label0
, label1
;
7069 /* If the target supports a compare-and-swap pattern that simultaneously
7070 sets some flag for success, then use it. Otherwise use the regular
7071 compare-and-swap and follow that immediately with a compare insn. */
7072 icode
= sync_compare_and_swap_cc
[mode
];
7076 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7078 if (subtarget
!= NULL_RTX
)
7082 case CODE_FOR_nothing
:
7083 icode
= sync_compare_and_swap
[mode
];
7084 if (icode
== CODE_FOR_nothing
)
7087 /* Ensure that if old_val == mem, that we're not comparing
7088 against an old value. */
7089 if (MEM_P (old_val
))
7090 old_val
= force_reg (mode
, old_val
);
7092 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7094 if (subtarget
== NULL_RTX
)
7097 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
7100 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
7101 setcc instruction from the beginning. We don't work too hard here,
7102 but it's nice to not be stupid about initial code gen either. */
7103 if (STORE_FLAG_VALUE
== 1)
7105 icode
= setcc_gen_code
[EQ
];
7106 if (icode
!= CODE_FOR_nothing
)
7108 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
7112 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
7113 subtarget
= gen_reg_rtx (cmode
);
7115 insn
= GEN_FCN (icode
) (subtarget
);
7119 if (GET_MODE (target
) != GET_MODE (subtarget
))
7121 convert_move (target
, subtarget
, 1);
7129 /* Without an appropriate setcc instruction, use a set of branches to
7130 get 1 and 0 stored into target. Presumably if the target has a
7131 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
7133 label0
= gen_label_rtx ();
7134 label1
= gen_label_rtx ();
7136 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
7137 emit_move_insn (target
, const0_rtx
);
7138 emit_jump_insn (gen_jump (label1
));
7140 emit_label (label0
);
7141 emit_move_insn (target
, const1_rtx
);
7142 emit_label (label1
);
7147 /* This is a helper function for the other atomic operations. This function
7148 emits a loop that contains SEQ that iterates until a compare-and-swap
7149 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7150 a set of instructions that takes a value from OLD_REG as an input and
7151 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7152 set to the current contents of MEM. After SEQ, a compare-and-swap will
7153 attempt to update MEM with NEW_REG. The function returns true when the
7154 loop was generated successfully. */
7157 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7159 enum machine_mode mode
= GET_MODE (mem
);
7160 enum insn_code icode
;
7161 rtx label
, cmp_reg
, subtarget
;
7163 /* The loop we want to generate looks like
7169 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7170 if (cmp_reg != old_reg)
7173 Note that we only do the plain load from memory once. Subsequent
7174 iterations use the value loaded by the compare-and-swap pattern. */
7176 label
= gen_label_rtx ();
7177 cmp_reg
= gen_reg_rtx (mode
);
7179 emit_move_insn (cmp_reg
, mem
);
7181 emit_move_insn (old_reg
, cmp_reg
);
7185 /* If the target supports a compare-and-swap pattern that simultaneously
7186 sets some flag for success, then use it. Otherwise use the regular
7187 compare-and-swap and follow that immediately with a compare insn. */
7188 icode
= sync_compare_and_swap_cc
[mode
];
7192 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7194 if (subtarget
!= NULL_RTX
)
7196 gcc_assert (subtarget
== cmp_reg
);
7201 case CODE_FOR_nothing
:
7202 icode
= sync_compare_and_swap
[mode
];
7203 if (icode
== CODE_FOR_nothing
)
7206 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7208 if (subtarget
== NULL_RTX
)
7210 if (subtarget
!= cmp_reg
)
7211 emit_move_insn (cmp_reg
, subtarget
);
7213 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
7216 /* ??? Mark this jump predicted not taken? */
7217 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
7222 /* This function generates the atomic operation MEM CODE= VAL. In this
7223 case, we do not care about any resulting value. Returns NULL if we
7224 cannot generate the operation. */
7227 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
7229 enum machine_mode mode
= GET_MODE (mem
);
7230 enum insn_code icode
;
7233 /* Look to see if the target supports the operation directly. */
7237 icode
= sync_add_optab
[mode
];
7240 icode
= sync_ior_optab
[mode
];
7243 icode
= sync_xor_optab
[mode
];
7246 icode
= sync_and_optab
[mode
];
7249 icode
= sync_nand_optab
[mode
];
7253 icode
= sync_sub_optab
[mode
];
7254 if (icode
== CODE_FOR_nothing
|| CONST_INT_P (val
))
7256 icode
= sync_add_optab
[mode
];
7257 if (icode
!= CODE_FOR_nothing
)
7259 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7269 /* Generate the direct operation, if present. */
7270 if (icode
!= CODE_FOR_nothing
)
7272 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7273 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7274 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
7275 val
= force_reg (mode
, val
);
7277 insn
= GEN_FCN (icode
) (mem
, val
);
7285 /* Failing that, generate a compare-and-swap loop in which we perform the
7286 operation with normal arithmetic instructions. */
7287 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7289 rtx t0
= gen_reg_rtx (mode
), t1
;
7296 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7299 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7300 true, OPTAB_LIB_WIDEN
);
7302 insn
= get_insns ();
7305 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7312 /* This function generates the atomic operation MEM CODE= VAL. In this
7313 case, we do care about the resulting value: if AFTER is true then
7314 return the value MEM holds after the operation, if AFTER is false
7315 then return the value MEM holds before the operation. TARGET is an
7316 optional place for the result value to be stored. */
7319 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
7320 bool after
, rtx target
)
7322 enum machine_mode mode
= GET_MODE (mem
);
7323 enum insn_code old_code
, new_code
, icode
;
7327 /* Look to see if the target supports the operation directly. */
7331 old_code
= sync_old_add_optab
[mode
];
7332 new_code
= sync_new_add_optab
[mode
];
7335 old_code
= sync_old_ior_optab
[mode
];
7336 new_code
= sync_new_ior_optab
[mode
];
7339 old_code
= sync_old_xor_optab
[mode
];
7340 new_code
= sync_new_xor_optab
[mode
];
7343 old_code
= sync_old_and_optab
[mode
];
7344 new_code
= sync_new_and_optab
[mode
];
7347 old_code
= sync_old_nand_optab
[mode
];
7348 new_code
= sync_new_nand_optab
[mode
];
7352 old_code
= sync_old_sub_optab
[mode
];
7353 new_code
= sync_new_sub_optab
[mode
];
7354 if ((old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
7355 || CONST_INT_P (val
))
7357 old_code
= sync_old_add_optab
[mode
];
7358 new_code
= sync_new_add_optab
[mode
];
7359 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
7361 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7371 /* If the target does supports the proper new/old operation, great. But
7372 if we only support the opposite old/new operation, check to see if we
7373 can compensate. In the case in which the old value is supported, then
7374 we can always perform the operation again with normal arithmetic. In
7375 the case in which the new value is supported, then we can only handle
7376 this in the case the operation is reversible. */
7381 if (icode
== CODE_FOR_nothing
)
7384 if (icode
!= CODE_FOR_nothing
)
7391 if (icode
== CODE_FOR_nothing
7392 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
7395 if (icode
!= CODE_FOR_nothing
)
7400 /* If we found something supported, great. */
7401 if (icode
!= CODE_FOR_nothing
)
7403 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7404 target
= gen_reg_rtx (mode
);
7406 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7407 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7408 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7409 val
= force_reg (mode
, val
);
7411 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7416 /* If we need to compensate for using an operation with the
7417 wrong return value, do so now. */
7424 else if (code
== MINUS
)
7429 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
7430 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
7431 true, OPTAB_LIB_WIDEN
);
7438 /* Failing that, generate a compare-and-swap loop in which we perform the
7439 operation with normal arithmetic instructions. */
7440 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7442 rtx t0
= gen_reg_rtx (mode
), t1
;
7444 if (!target
|| !register_operand (target
, mode
))
7445 target
= gen_reg_rtx (mode
);
7450 emit_move_insn (target
, t0
);
7454 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7457 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7458 true, OPTAB_LIB_WIDEN
);
7460 emit_move_insn (target
, t1
);
7462 insn
= get_insns ();
7465 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7472 /* This function expands a test-and-set operation. Ideally we atomically
7473 store VAL in MEM and return the previous value in MEM. Some targets
7474 may not support this operation and only support VAL with the constant 1;
7475 in this case while the return value will be 0/1, but the exact value
7476 stored in MEM is target defined. TARGET is an option place to stick
7477 the return value. */
7480 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
7482 enum machine_mode mode
= GET_MODE (mem
);
7483 enum insn_code icode
;
7486 /* If the target supports the test-and-set directly, great. */
7487 icode
= sync_lock_test_and_set
[mode
];
7488 if (icode
!= CODE_FOR_nothing
)
7490 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7491 target
= gen_reg_rtx (mode
);
7493 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7494 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7495 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7496 val
= force_reg (mode
, val
);
7498 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7506 /* Otherwise, use a compare-and-swap loop for the exchange. */
7507 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7509 if (!target
|| !register_operand (target
, mode
))
7510 target
= gen_reg_rtx (mode
);
7511 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7512 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7513 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7520 #include "gt-optabs.h"