1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 #if GCC_VERSION >= 4000
58 __extension__
struct optab optab_table
[OTI_MAX
]
59 = { [0 ... OTI_MAX
- 1].handlers
[0 ... NUM_MACHINE_MODES
- 1].insn_code
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table
[OTI_MAX
];
66 rtx libfunc_table
[LTI_MAX
];
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__
struct convert_optab convert_optab_table
[COI_MAX
]
71 = { [0 ... COI_MAX
- 1].handlers
[0 ... NUM_MACHINE_MODES
- 1]
72 [0 ... NUM_MACHINE_MODES
- 1].insn_code
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table
[COI_MAX
];
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab
[NUM_RTX_CODE
+ 1];
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
85 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
91 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
99 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
102 /* Indexed by the machine mode, gives the insn code for vector conditional
105 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
106 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx
;
113 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
114 enum machine_mode
*, int *);
115 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
129 #define DECIMAL_PREFIX "dpd_"
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct libfunc_entry
GTY(())
138 enum machine_mode mode1
, mode2
;
142 /* Hash table used to convert declarations into nodes. */
143 static GTY((param_is (struct libfunc_entry
))) htab_t libfunc_hash
;
145 /* Used for attribute_hash. */
148 hash_libfunc (const void *p
)
150 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
152 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
156 /* Used for optab_hash. */
159 eq_libfunc (const void *p
, const void *q
)
161 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
162 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
164 return (e1
->optab
== e2
->optab
165 && e1
->mode1
== e2
->mode1
166 && e1
->mode2
== e2
->mode2
);
169 /* Return libfunc corresponding operation defined by OPTAB converting
170 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
171 if no libfunc is available. */
173 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
174 enum machine_mode mode2
)
176 struct libfunc_entry e
;
177 struct libfunc_entry
**slot
;
179 e
.optab
= (size_t) (optab
- &convert_optab_table
[0]);
182 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
185 if (optab
->libcall_gen
)
187 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
188 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
190 return (*slot
)->libfunc
;
196 return (*slot
)->libfunc
;
199 /* Return libfunc corresponding operation defined by OPTAB in MODE.
200 Trigger lazy initialization if needed, return NULL if no libfunc is
203 optab_libfunc (optab optab
, enum machine_mode mode
)
205 struct libfunc_entry e
;
206 struct libfunc_entry
**slot
;
208 e
.optab
= (size_t) (optab
- &optab_table
[0]);
211 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
214 if (optab
->libcall_gen
)
216 optab
->libcall_gen (optab
, optab
->libcall_basename
,
217 optab
->libcall_suffix
, mode
);
218 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
221 return (*slot
)->libfunc
;
227 return (*slot
)->libfunc
;
231 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
232 the result of operation CODE applied to OP0 (and OP1 if it is a binary
235 If the last insn does not set TARGET, don't do anything, but return 1.
237 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
238 don't add the REG_EQUAL note but return 0. Our caller can then try
239 again, ensuring that TARGET is not one of the operands. */
242 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
244 rtx last_insn
, insn
, set
;
247 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
249 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
250 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
251 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
252 && GET_RTX_CLASS (code
) != RTX_COMPARE
253 && GET_RTX_CLASS (code
) != RTX_UNARY
)
256 if (GET_CODE (target
) == ZERO_EXTRACT
)
259 for (last_insn
= insns
;
260 NEXT_INSN (last_insn
) != NULL_RTX
;
261 last_insn
= NEXT_INSN (last_insn
))
264 set
= single_set (last_insn
);
268 if (! rtx_equal_p (SET_DEST (set
), target
)
269 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
270 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
271 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
274 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
275 besides the last insn. */
276 if (reg_overlap_mentioned_p (target
, op0
)
277 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
279 insn
= PREV_INSN (last_insn
);
280 while (insn
!= NULL_RTX
)
282 if (reg_set_p (target
, insn
))
285 insn
= PREV_INSN (insn
);
289 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
290 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
292 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
294 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
299 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
300 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
301 not actually do a sign-extend or zero-extend, but can leave the
302 higher-order bits of the result rtx undefined, for example, in the case
303 of logical operations, but not right shifts. */
306 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
307 int unsignedp
, int no_extend
)
311 /* If we don't have to extend and this is a constant, return it. */
312 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
315 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
316 extend since it will be more efficient to do so unless the signedness of
317 a promoted object differs from our extension. */
319 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
320 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
321 return convert_modes (mode
, oldmode
, op
, unsignedp
);
323 /* If MODE is no wider than a single word, we return a paradoxical
325 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
326 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
328 /* Otherwise, get an object of MODE, clobber it, and set the low-order
331 result
= gen_reg_rtx (mode
);
332 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
333 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
337 /* Return the optab used for computing the operation given by
338 the tree code, CODE. This function is not always usable (for
339 example, it cannot give complete results for multiplication
340 or division) but probably ought to be relied on more widely
341 throughout the expander. */
343 optab_for_tree_code (enum tree_code code
, const_tree type
)
355 return one_cmpl_optab
;
364 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
372 if (TYPE_SATURATING(type
))
373 return TYPE_UNSIGNED(type
) ? usdiv_optab
: ssdiv_optab
;
374 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
377 if (TYPE_SATURATING(type
))
378 return TYPE_UNSIGNED(type
) ? usashl_optab
: ssashl_optab
;
382 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
391 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
394 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
396 case REALIGN_LOAD_EXPR
:
397 return vec_realign_load_optab
;
400 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
403 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
406 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
409 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
411 case REDUC_PLUS_EXPR
:
412 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
414 case VEC_LSHIFT_EXPR
:
415 return vec_shl_optab
;
417 case VEC_RSHIFT_EXPR
:
418 return vec_shr_optab
;
420 case VEC_WIDEN_MULT_HI_EXPR
:
421 return TYPE_UNSIGNED (type
) ?
422 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
424 case VEC_WIDEN_MULT_LO_EXPR
:
425 return TYPE_UNSIGNED (type
) ?
426 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
428 case VEC_UNPACK_HI_EXPR
:
429 return TYPE_UNSIGNED (type
) ?
430 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
432 case VEC_UNPACK_LO_EXPR
:
433 return TYPE_UNSIGNED (type
) ?
434 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
436 case VEC_UNPACK_FLOAT_HI_EXPR
:
437 /* The signedness is determined from input operand. */
438 return TYPE_UNSIGNED (type
) ?
439 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
441 case VEC_UNPACK_FLOAT_LO_EXPR
:
442 /* The signedness is determined from input operand. */
443 return TYPE_UNSIGNED (type
) ?
444 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
446 case VEC_PACK_TRUNC_EXPR
:
447 return vec_pack_trunc_optab
;
449 case VEC_PACK_SAT_EXPR
:
450 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
452 case VEC_PACK_FIX_TRUNC_EXPR
:
453 /* The signedness is determined from output operand. */
454 return TYPE_UNSIGNED (type
) ?
455 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
461 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
464 case POINTER_PLUS_EXPR
:
466 if (TYPE_SATURATING(type
))
467 return TYPE_UNSIGNED(type
) ? usadd_optab
: ssadd_optab
;
468 return trapv
? addv_optab
: add_optab
;
471 if (TYPE_SATURATING(type
))
472 return TYPE_UNSIGNED(type
) ? ussub_optab
: sssub_optab
;
473 return trapv
? subv_optab
: sub_optab
;
476 if (TYPE_SATURATING(type
))
477 return TYPE_UNSIGNED(type
) ? usmul_optab
: ssmul_optab
;
478 return trapv
? smulv_optab
: smul_optab
;
481 if (TYPE_SATURATING(type
))
482 return TYPE_UNSIGNED(type
) ? usneg_optab
: ssneg_optab
;
483 return trapv
? negv_optab
: neg_optab
;
486 return trapv
? absv_optab
: abs_optab
;
488 case VEC_EXTRACT_EVEN_EXPR
:
489 return vec_extract_even_optab
;
491 case VEC_EXTRACT_ODD_EXPR
:
492 return vec_extract_odd_optab
;
494 case VEC_INTERLEAVE_HIGH_EXPR
:
495 return vec_interleave_high_optab
;
497 case VEC_INTERLEAVE_LOW_EXPR
:
498 return vec_interleave_low_optab
;
506 /* Expand vector widening operations.
508 There are two different classes of operations handled here:
509 1) Operations whose result is wider than all the arguments to the operation.
510 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
511 In this case OP0 and optionally OP1 would be initialized,
512 but WIDE_OP wouldn't (not relevant for this case).
513 2) Operations whose result is of the same size as the last argument to the
514 operation, but wider than all the other arguments to the operation.
515 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
516 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
518 E.g, when called to expand the following operations, this is how
519 the arguments will be initialized:
521 widening-sum 2 oprnd0 - oprnd1
522 widening-dot-product 3 oprnd0 oprnd1 oprnd2
523 widening-mult 2 oprnd0 oprnd1 -
524 type-promotion (vec-unpack) 1 oprnd0 - - */
527 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
530 tree oprnd0
, oprnd1
, oprnd2
;
531 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
532 optab widen_pattern_optab
;
534 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
537 rtx xop0
, xop1
, wxop
;
538 int nops
= TREE_OPERAND_LENGTH (exp
);
540 oprnd0
= TREE_OPERAND (exp
, 0);
541 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
542 widen_pattern_optab
=
543 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
544 icode
= (int) optab_handler (widen_pattern_optab
, tmode0
)->insn_code
;
545 gcc_assert (icode
!= CODE_FOR_nothing
);
546 xmode0
= insn_data
[icode
].operand
[1].mode
;
550 oprnd1
= TREE_OPERAND (exp
, 1);
551 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
552 xmode1
= insn_data
[icode
].operand
[2].mode
;
555 /* The last operand is of a wider mode than the rest of the operands. */
563 gcc_assert (tmode1
== tmode0
);
565 oprnd2
= TREE_OPERAND (exp
, 2);
566 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
567 wxmode
= insn_data
[icode
].operand
[3].mode
;
571 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
574 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
575 temp
= gen_reg_rtx (wmode
);
583 /* In case the insn wants input operands in modes different from
584 those of the actual operands, convert the operands. It would
585 seem that we don't need to convert CONST_INTs, but we do, so
586 that they're properly zero-extended, sign-extended or truncated
589 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
590 xop0
= convert_modes (xmode0
,
591 GET_MODE (op0
) != VOIDmode
597 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
598 xop1
= convert_modes (xmode1
,
599 GET_MODE (op1
) != VOIDmode
605 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
606 wxop
= convert_modes (wxmode
,
607 GET_MODE (wide_op
) != VOIDmode
612 /* Now, if insn's predicates don't allow our operands, put them into
615 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
616 && xmode0
!= VOIDmode
)
617 xop0
= copy_to_mode_reg (xmode0
, xop0
);
621 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
622 && xmode1
!= VOIDmode
)
623 xop1
= copy_to_mode_reg (xmode1
, xop1
);
627 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
628 && wxmode
!= VOIDmode
)
629 wxop
= copy_to_mode_reg (wxmode
, wxop
);
631 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
634 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
640 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
641 && wxmode
!= VOIDmode
)
642 wxop
= copy_to_mode_reg (wxmode
, wxop
);
644 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
647 pat
= GEN_FCN (icode
) (temp
, xop0
);
654 /* Generate code to perform an operation specified by TERNARY_OPTAB
655 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
657 UNSIGNEDP is for the case where we have to widen the operands
658 to perform the operation. It says to use zero-extension.
660 If TARGET is nonzero, the value
661 is generated there, if it is convenient to do so.
662 In all cases an rtx is returned for the locus of the value;
663 this may or may not be TARGET. */
666 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
667 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
669 int icode
= (int) optab_handler (ternary_optab
, mode
)->insn_code
;
670 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
671 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
672 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
675 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
677 gcc_assert (optab_handler (ternary_optab
, mode
)->insn_code
678 != CODE_FOR_nothing
);
680 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
681 temp
= gen_reg_rtx (mode
);
685 /* In case the insn wants input operands in modes different from
686 those of the actual operands, convert the operands. It would
687 seem that we don't need to convert CONST_INTs, but we do, so
688 that they're properly zero-extended, sign-extended or truncated
691 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
692 xop0
= convert_modes (mode0
,
693 GET_MODE (op0
) != VOIDmode
698 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
699 xop1
= convert_modes (mode1
,
700 GET_MODE (op1
) != VOIDmode
705 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
706 xop2
= convert_modes (mode2
,
707 GET_MODE (op2
) != VOIDmode
712 /* Now, if insn's predicates don't allow our operands, put them into
715 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
716 && mode0
!= VOIDmode
)
717 xop0
= copy_to_mode_reg (mode0
, xop0
);
719 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
720 && mode1
!= VOIDmode
)
721 xop1
= copy_to_mode_reg (mode1
, xop1
);
723 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
724 && mode2
!= VOIDmode
)
725 xop2
= copy_to_mode_reg (mode2
, xop2
);
727 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
734 /* Like expand_binop, but return a constant rtx if the result can be
735 calculated at compile time. The arguments and return value are
736 otherwise the same as for expand_binop. */
739 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
740 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
741 enum optab_methods methods
)
743 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
745 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
751 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
754 /* Like simplify_expand_binop, but always put the result in TARGET.
755 Return true if the expansion succeeded. */
758 force_expand_binop (enum machine_mode mode
, optab binoptab
,
759 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
760 enum optab_methods methods
)
762 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
763 target
, unsignedp
, methods
);
767 emit_move_insn (target
, x
);
771 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
774 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
776 enum insn_code icode
;
777 rtx rtx_op1
, rtx_op2
;
778 enum machine_mode mode1
;
779 enum machine_mode mode2
;
780 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
781 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
782 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
786 switch (TREE_CODE (vec_shift_expr
))
788 case VEC_RSHIFT_EXPR
:
789 shift_optab
= vec_shr_optab
;
791 case VEC_LSHIFT_EXPR
:
792 shift_optab
= vec_shl_optab
;
798 icode
= (int) optab_handler (shift_optab
, mode
)->insn_code
;
799 gcc_assert (icode
!= CODE_FOR_nothing
);
801 mode1
= insn_data
[icode
].operand
[1].mode
;
802 mode2
= insn_data
[icode
].operand
[2].mode
;
804 rtx_op1
= expand_normal (vec_oprnd
);
805 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
806 && mode1
!= VOIDmode
)
807 rtx_op1
= force_reg (mode1
, rtx_op1
);
809 rtx_op2
= expand_normal (shift_oprnd
);
810 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
811 && mode2
!= VOIDmode
)
812 rtx_op2
= force_reg (mode2
, rtx_op2
);
815 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
816 target
= gen_reg_rtx (mode
);
818 /* Emit instruction */
819 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
826 /* This subroutine of expand_doubleword_shift handles the cases in which
827 the effective shift value is >= BITS_PER_WORD. The arguments and return
828 value are the same as for the parent routine, except that SUPERWORD_OP1
829 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
830 INTO_TARGET may be null if the caller has decided to calculate it. */
833 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
834 rtx outof_target
, rtx into_target
,
835 int unsignedp
, enum optab_methods methods
)
837 if (into_target
!= 0)
838 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
839 into_target
, unsignedp
, methods
))
842 if (outof_target
!= 0)
844 /* For a signed right shift, we must fill OUTOF_TARGET with copies
845 of the sign bit, otherwise we must fill it with zeros. */
846 if (binoptab
!= ashr_optab
)
847 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
849 if (!force_expand_binop (word_mode
, binoptab
,
850 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
851 outof_target
, unsignedp
, methods
))
857 /* This subroutine of expand_doubleword_shift handles the cases in which
858 the effective shift value is < BITS_PER_WORD. The arguments and return
859 value are the same as for the parent routine. */
862 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
863 rtx outof_input
, rtx into_input
, rtx op1
,
864 rtx outof_target
, rtx into_target
,
865 int unsignedp
, enum optab_methods methods
,
866 unsigned HOST_WIDE_INT shift_mask
)
868 optab reverse_unsigned_shift
, unsigned_shift
;
871 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
872 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
874 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
875 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
876 the opposite direction to BINOPTAB. */
877 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
879 carries
= outof_input
;
880 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
881 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
886 /* We must avoid shifting by BITS_PER_WORD bits since that is either
887 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
888 has unknown behavior. Do a single shift first, then shift by the
889 remainder. It's OK to use ~OP1 as the remainder if shift counts
890 are truncated to the mode size. */
891 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
892 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
893 if (shift_mask
== BITS_PER_WORD
- 1)
895 tmp
= immed_double_const (-1, -1, op1_mode
);
896 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
901 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
902 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
906 if (tmp
== 0 || carries
== 0)
908 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
909 carries
, tmp
, 0, unsignedp
, methods
);
913 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
914 so the result can go directly into INTO_TARGET if convenient. */
915 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
916 into_target
, unsignedp
, methods
);
920 /* Now OR in the bits carried over from OUTOF_INPUT. */
921 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
922 into_target
, unsignedp
, methods
))
925 /* Use a standard word_mode shift for the out-of half. */
926 if (outof_target
!= 0)
927 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
928 outof_target
, unsignedp
, methods
))
935 #ifdef HAVE_conditional_move
936 /* Try implementing expand_doubleword_shift using conditional moves.
937 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
938 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
939 are the shift counts to use in the former and latter case. All other
940 arguments are the same as the parent routine. */
943 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
944 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
945 rtx outof_input
, rtx into_input
,
946 rtx subword_op1
, rtx superword_op1
,
947 rtx outof_target
, rtx into_target
,
948 int unsignedp
, enum optab_methods methods
,
949 unsigned HOST_WIDE_INT shift_mask
)
951 rtx outof_superword
, into_superword
;
953 /* Put the superword version of the output into OUTOF_SUPERWORD and
955 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
956 if (outof_target
!= 0 && subword_op1
== superword_op1
)
958 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
959 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
960 into_superword
= outof_target
;
961 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
962 outof_superword
, 0, unsignedp
, methods
))
967 into_superword
= gen_reg_rtx (word_mode
);
968 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
969 outof_superword
, into_superword
,
974 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
975 if (!expand_subword_shift (op1_mode
, binoptab
,
976 outof_input
, into_input
, subword_op1
,
977 outof_target
, into_target
,
978 unsignedp
, methods
, shift_mask
))
981 /* Select between them. Do the INTO half first because INTO_SUPERWORD
982 might be the current value of OUTOF_TARGET. */
983 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
984 into_target
, into_superword
, word_mode
, false))
987 if (outof_target
!= 0)
988 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
989 outof_target
, outof_superword
,
997 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
998 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
999 input operand; the shift moves bits in the direction OUTOF_INPUT->
1000 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1001 of the target. OP1 is the shift count and OP1_MODE is its mode.
1002 If OP1 is constant, it will have been truncated as appropriate
1003 and is known to be nonzero.
1005 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1006 shift count is outside the range [0, BITS_PER_WORD). This routine must
1007 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1009 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1010 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1011 fill with zeros or sign bits as appropriate.
1013 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1014 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1015 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1016 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1019 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1020 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1021 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1022 function wants to calculate it itself.
1024 Return true if the shift could be successfully synthesized. */
1027 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
1028 rtx outof_input
, rtx into_input
, rtx op1
,
1029 rtx outof_target
, rtx into_target
,
1030 int unsignedp
, enum optab_methods methods
,
1031 unsigned HOST_WIDE_INT shift_mask
)
1033 rtx superword_op1
, tmp
, cmp1
, cmp2
;
1034 rtx subword_label
, done_label
;
1035 enum rtx_code cmp_code
;
1037 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1038 fill the result with sign or zero bits as appropriate. If so, the value
1039 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1040 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1041 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1043 This isn't worthwhile for constant shifts since the optimizers will
1044 cope better with in-range shift counts. */
1045 if (shift_mask
>= BITS_PER_WORD
1046 && outof_target
!= 0
1047 && !CONSTANT_P (op1
))
1049 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1050 outof_input
, into_input
, op1
,
1052 unsignedp
, methods
, shift_mask
))
1054 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1055 outof_target
, unsignedp
, methods
))
1060 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1061 is true when the effective shift value is less than BITS_PER_WORD.
1062 Set SUPERWORD_OP1 to the shift count that should be used to shift
1063 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1064 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1065 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1067 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1068 is a subword shift count. */
1069 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1071 cmp2
= CONST0_RTX (op1_mode
);
1073 superword_op1
= op1
;
1077 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1078 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1080 cmp2
= CONST0_RTX (op1_mode
);
1082 superword_op1
= cmp1
;
1087 /* If we can compute the condition at compile time, pick the
1088 appropriate subroutine. */
1089 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1090 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
1092 if (tmp
== const0_rtx
)
1093 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1094 outof_target
, into_target
,
1095 unsignedp
, methods
);
1097 return expand_subword_shift (op1_mode
, binoptab
,
1098 outof_input
, into_input
, op1
,
1099 outof_target
, into_target
,
1100 unsignedp
, methods
, shift_mask
);
1103 #ifdef HAVE_conditional_move
1104 /* Try using conditional moves to generate straight-line code. */
1106 rtx start
= get_last_insn ();
1107 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1108 cmp_code
, cmp1
, cmp2
,
1109 outof_input
, into_input
,
1111 outof_target
, into_target
,
1112 unsignedp
, methods
, shift_mask
))
1114 delete_insns_since (start
);
1118 /* As a last resort, use branches to select the correct alternative. */
1119 subword_label
= gen_label_rtx ();
1120 done_label
= gen_label_rtx ();
1123 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1124 0, 0, subword_label
);
1127 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1128 outof_target
, into_target
,
1129 unsignedp
, methods
))
1132 emit_jump_insn (gen_jump (done_label
));
1134 emit_label (subword_label
);
1136 if (!expand_subword_shift (op1_mode
, binoptab
,
1137 outof_input
, into_input
, op1
,
1138 outof_target
, into_target
,
1139 unsignedp
, methods
, shift_mask
))
1142 emit_label (done_label
);
1146 /* Subroutine of expand_binop. Perform a double word multiplication of
1147 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1148 as the target's word_mode. This function return NULL_RTX if anything
1149 goes wrong, in which case it may have already emitted instructions
1150 which need to be deleted.
1152 If we want to multiply two two-word values and have normal and widening
1153 multiplies of single-word values, we can do this with three smaller
1154 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1155 because we are not operating on one word at a time.
1157 The multiplication proceeds as follows:
1158 _______________________
1159 [__op0_high_|__op0_low__]
1160 _______________________
1161 * [__op1_high_|__op1_low__]
1162 _______________________________________________
1163 _______________________
1164 (1) [__op0_low__*__op1_low__]
1165 _______________________
1166 (2a) [__op0_low__*__op1_high_]
1167 _______________________
1168 (2b) [__op0_high_*__op1_low__]
1169 _______________________
1170 (3) [__op0_high_*__op1_high_]
1173 This gives a 4-word result. Since we are only interested in the
1174 lower 2 words, partial result (3) and the upper words of (2a) and
1175 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1176 calculated using non-widening multiplication.
1178 (1), however, needs to be calculated with an unsigned widening
1179 multiplication. If this operation is not directly supported we
1180 try using a signed widening multiplication and adjust the result.
1181 This adjustment works as follows:
1183 If both operands are positive then no adjustment is needed.
1185 If the operands have different signs, for example op0_low < 0 and
1186 op1_low >= 0, the instruction treats the most significant bit of
1187 op0_low as a sign bit instead of a bit with significance
1188 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1189 with 2**BITS_PER_WORD - op0_low, and two's complements the
1190 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1193 Similarly, if both operands are negative, we need to add
1194 (op0_low + op1_low) * 2**BITS_PER_WORD.
1196 We use a trick to adjust quickly. We logically shift op0_low right
1197 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1198 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1199 logical shift exists, we do an arithmetic right shift and subtract
1203 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1204 bool umulp
, enum optab_methods methods
)
1206 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1207 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1208 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1209 rtx product
, adjust
, product_high
, temp
;
1211 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1212 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1213 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1214 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1216 /* If we're using an unsigned multiply to directly compute the product
1217 of the low-order words of the operands and perform any required
1218 adjustments of the operands, we begin by trying two more multiplications
1219 and then computing the appropriate sum.
1221 We have checked above that the required addition is provided.
1222 Full-word addition will normally always succeed, especially if
1223 it is provided at all, so we don't worry about its failure. The
1224 multiplication may well fail, however, so we do handle that. */
1228 /* ??? This could be done with emit_store_flag where available. */
1229 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1230 NULL_RTX
, 1, methods
);
1232 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1233 NULL_RTX
, 0, OPTAB_DIRECT
);
1236 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1237 NULL_RTX
, 0, methods
);
1240 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1241 NULL_RTX
, 0, OPTAB_DIRECT
);
1248 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1249 NULL_RTX
, 0, OPTAB_DIRECT
);
1253 /* OP0_HIGH should now be dead. */
1257 /* ??? This could be done with emit_store_flag where available. */
1258 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1259 NULL_RTX
, 1, methods
);
1261 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1262 NULL_RTX
, 0, OPTAB_DIRECT
);
1265 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1266 NULL_RTX
, 0, methods
);
1269 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1270 NULL_RTX
, 0, OPTAB_DIRECT
);
1277 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1278 NULL_RTX
, 0, OPTAB_DIRECT
);
1282 /* OP1_HIGH should now be dead. */
1284 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1285 adjust
, 0, OPTAB_DIRECT
);
1287 if (target
&& !REG_P (target
))
1291 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1292 target
, 1, OPTAB_DIRECT
);
1294 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1295 target
, 1, OPTAB_DIRECT
);
1300 product_high
= operand_subword (product
, high
, 1, mode
);
1301 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1302 REG_P (product_high
) ? product_high
: adjust
,
1304 emit_move_insn (product_high
, adjust
);
1308 /* Wrapper around expand_binop which takes an rtx code to specify
1309 the operation to perform, not an optab pointer. All other
1310 arguments are the same. */
1312 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1313 rtx op1
, rtx target
, int unsignedp
,
1314 enum optab_methods methods
)
1316 optab binop
= code_to_optab
[(int) code
];
1319 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1322 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1323 binop. Order them according to commutative_operand_precedence and, if
1324 possible, try to put TARGET or a pseudo first. */
1326 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1328 int op0_prec
= commutative_operand_precedence (op0
);
1329 int op1_prec
= commutative_operand_precedence (op1
);
1331 if (op0_prec
< op1_prec
)
1334 if (op0_prec
> op1_prec
)
1337 /* With equal precedence, both orders are ok, but it is better if the
1338 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1339 if (target
== 0 || REG_P (target
))
1340 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1342 return rtx_equal_p (op1
, target
);
1345 /* Return true if BINOPTAB implements a shift operation. */
1348 shift_optab_p (optab binoptab
)
1350 switch (binoptab
->code
)
1366 /* Return true if BINOPTAB implements a commutative binary operation. */
1369 commutative_optab_p (optab binoptab
)
1371 return (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1372 || binoptab
== smul_widen_optab
1373 || binoptab
== umul_widen_optab
1374 || binoptab
== smul_highpart_optab
1375 || binoptab
== umul_highpart_optab
);
1378 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1379 optimizing, and if the operand is a constant that costs more than
1380 1 instruction, force the constant into a register and return that
1381 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1384 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1385 rtx x
, bool unsignedp
)
1387 if (mode
!= VOIDmode
1390 && rtx_cost (x
, binoptab
->code
) > COSTS_N_INSNS (1))
1392 if (GET_CODE (x
) == CONST_INT
)
1394 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1395 if (intval
!= INTVAL (x
))
1396 x
= GEN_INT (intval
);
1399 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1400 x
= force_reg (mode
, x
);
1405 /* Helper function for expand_binop: handle the case where there
1406 is an insn that directly implements the indicated operation.
1407 Returns null if this is not possible. */
1409 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1411 rtx target
, int unsignedp
, enum optab_methods methods
,
1414 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
1415 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1416 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1417 enum machine_mode tmp_mode
;
1420 rtx xop0
= op0
, xop1
= op1
;
1427 temp
= gen_reg_rtx (mode
);
1429 /* If it is a commutative operator and the modes would match
1430 if we would swap the operands, we can save the conversions. */
1431 commutative_p
= commutative_optab_p (binoptab
);
1433 && GET_MODE (xop0
) != mode0
&& GET_MODE (xop1
) != mode1
1434 && GET_MODE (xop0
) == mode1
&& GET_MODE (xop1
) == mode1
)
1441 /* If we are optimizing, force expensive constants into a register. */
1442 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
1443 if (!shift_optab_p (binoptab
))
1444 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
1446 /* In case the insn wants input operands in modes different from
1447 those of the actual operands, convert the operands. It would
1448 seem that we don't need to convert CONST_INTs, but we do, so
1449 that they're properly zero-extended, sign-extended or truncated
1452 if (GET_MODE (xop0
) != mode0
&& mode0
!= VOIDmode
)
1453 xop0
= convert_modes (mode0
,
1454 GET_MODE (xop0
) != VOIDmode
1459 if (GET_MODE (xop1
) != mode1
&& mode1
!= VOIDmode
)
1460 xop1
= convert_modes (mode1
,
1461 GET_MODE (xop1
) != VOIDmode
1466 /* If operation is commutative,
1467 try to make the first operand a register.
1468 Even better, try to make it the same as the target.
1469 Also try to make the last operand a constant. */
1471 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1478 /* Now, if insn's predicates don't allow our operands, put them into
1481 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1482 && mode0
!= VOIDmode
)
1483 xop0
= copy_to_mode_reg (mode0
, xop0
);
1485 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1486 && mode1
!= VOIDmode
)
1487 xop1
= copy_to_mode_reg (mode1
, xop1
);
1489 if (binoptab
== vec_pack_trunc_optab
1490 || binoptab
== vec_pack_usat_optab
1491 || binoptab
== vec_pack_ssat_optab
1492 || binoptab
== vec_pack_ufix_trunc_optab
1493 || binoptab
== vec_pack_sfix_trunc_optab
)
1495 /* The mode of the result is different then the mode of the
1497 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1498 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1504 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1505 temp
= gen_reg_rtx (tmp_mode
);
1507 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1510 /* If PAT is composed of more than one insn, try to add an appropriate
1511 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1512 operand, call expand_binop again, this time without a target. */
1513 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1514 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1516 delete_insns_since (last
);
1517 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1518 unsignedp
, methods
);
1525 delete_insns_since (last
);
1529 /* Generate code to perform an operation specified by BINOPTAB
1530 on operands OP0 and OP1, with result having machine-mode MODE.
1532 UNSIGNEDP is for the case where we have to widen the operands
1533 to perform the operation. It says to use zero-extension.
1535 If TARGET is nonzero, the value
1536 is generated there, if it is convenient to do so.
1537 In all cases an rtx is returned for the locus of the value;
1538 this may or may not be TARGET. */
1541 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1542 rtx target
, int unsignedp
, enum optab_methods methods
)
1544 enum optab_methods next_methods
1545 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1546 ? OPTAB_WIDEN
: methods
);
1547 enum mode_class
class;
1548 enum machine_mode wider_mode
;
1551 rtx entry_last
= get_last_insn ();
1554 class = GET_MODE_CLASS (mode
);
1556 /* If subtracting an integer constant, convert this into an addition of
1557 the negated constant. */
1559 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1561 op1
= negate_rtx (mode
, op1
);
1562 binoptab
= add_optab
;
1565 /* Record where to delete back to if we backtrack. */
1566 last
= get_last_insn ();
1568 /* If we can do it with a three-operand insn, do so. */
1570 if (methods
!= OPTAB_MUST_WIDEN
1571 && optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1573 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1574 unsignedp
, methods
, last
);
1579 /* If we were trying to rotate, and that didn't work, try rotating
1580 the other direction before falling back to shifts and bitwise-or. */
1581 if (((binoptab
== rotl_optab
1582 && optab_handler (rotr_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1583 || (binoptab
== rotr_optab
1584 && optab_handler (rotl_optab
, mode
)->insn_code
!= CODE_FOR_nothing
))
1585 && class == MODE_INT
)
1587 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1589 unsigned int bits
= GET_MODE_BITSIZE (mode
);
1591 if (GET_CODE (op1
) == CONST_INT
)
1592 newop1
= GEN_INT (bits
- INTVAL (op1
));
1593 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1594 newop1
= negate_rtx (mode
, op1
);
1596 newop1
= expand_binop (mode
, sub_optab
,
1597 GEN_INT (bits
), op1
,
1598 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1600 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1601 target
, unsignedp
, methods
, last
);
1606 /* If this is a multiply, see if we can do a widening operation that
1607 takes operands of this mode and makes a wider mode. */
1609 if (binoptab
== smul_optab
1610 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1611 && ((optab_handler ((unsignedp
? umul_widen_optab
: smul_widen_optab
),
1612 GET_MODE_WIDER_MODE (mode
))->insn_code
)
1613 != CODE_FOR_nothing
))
1615 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1616 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1617 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1621 if (GET_MODE_CLASS (mode
) == MODE_INT
1622 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1623 GET_MODE_BITSIZE (GET_MODE (temp
))))
1624 return gen_lowpart (mode
, temp
);
1626 return convert_to_mode (mode
, temp
, unsignedp
);
1630 /* Look for a wider mode of the same class for which we think we
1631 can open-code the operation. Check for a widening multiply at the
1632 wider mode as well. */
1634 if (CLASS_HAS_WIDER_MODES_P (class)
1635 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1636 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1637 wider_mode
!= VOIDmode
;
1638 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1640 if (optab_handler (binoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
1641 || (binoptab
== smul_optab
1642 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1643 && ((optab_handler ((unsignedp
? umul_widen_optab
1644 : smul_widen_optab
),
1645 GET_MODE_WIDER_MODE (wider_mode
))->insn_code
)
1646 != CODE_FOR_nothing
)))
1648 rtx xop0
= op0
, xop1
= op1
;
1651 /* For certain integer operations, we need not actually extend
1652 the narrow operands, as long as we will truncate
1653 the results to the same narrowness. */
1655 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1656 || binoptab
== xor_optab
1657 || binoptab
== add_optab
|| binoptab
== sub_optab
1658 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1659 && class == MODE_INT
)
1662 xop0
= avoid_expensive_constant (mode
, binoptab
,
1664 if (binoptab
!= ashl_optab
)
1665 xop1
= avoid_expensive_constant (mode
, binoptab
,
1669 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1671 /* The second operand of a shift must always be extended. */
1672 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1673 no_extend
&& binoptab
!= ashl_optab
);
1675 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1676 unsignedp
, OPTAB_DIRECT
);
1679 if (class != MODE_INT
1680 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1681 GET_MODE_BITSIZE (wider_mode
)))
1684 target
= gen_reg_rtx (mode
);
1685 convert_move (target
, temp
, 0);
1689 return gen_lowpart (mode
, temp
);
1692 delete_insns_since (last
);
1696 /* If operation is commutative,
1697 try to make the first operand a register.
1698 Even better, try to make it the same as the target.
1699 Also try to make the last operand a constant. */
1700 if (commutative_optab_p (binoptab
)
1701 && swap_commutative_operands_with_target (target
, op0
, op1
))
1708 /* These can be done a word at a time. */
1709 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1710 && class == MODE_INT
1711 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1712 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1718 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1719 won't be accurate, so use a new target. */
1720 if (target
== 0 || target
== op0
|| target
== op1
)
1721 target
= gen_reg_rtx (mode
);
1725 /* Do the actual arithmetic. */
1726 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1728 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1729 rtx x
= expand_binop (word_mode
, binoptab
,
1730 operand_subword_force (op0
, i
, mode
),
1731 operand_subword_force (op1
, i
, mode
),
1732 target_piece
, unsignedp
, next_methods
);
1737 if (target_piece
!= x
)
1738 emit_move_insn (target_piece
, x
);
1741 insns
= get_insns ();
1744 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1746 if (binoptab
->code
!= UNKNOWN
)
1748 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1749 copy_rtx (op0
), copy_rtx (op1
));
1753 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1758 /* Synthesize double word shifts from single word shifts. */
1759 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1760 || binoptab
== ashr_optab
)
1761 && class == MODE_INT
1762 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1763 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1764 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1765 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1766 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1768 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1769 enum machine_mode op1_mode
;
1771 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1772 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1773 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1775 /* Apply the truncation to constant shifts. */
1776 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1777 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1779 if (op1
== CONST0_RTX (op1_mode
))
1782 /* Make sure that this is a combination that expand_doubleword_shift
1783 can handle. See the comments there for details. */
1784 if (double_shift_mask
== 0
1785 || (shift_mask
== BITS_PER_WORD
- 1
1786 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1788 rtx insns
, equiv_value
;
1789 rtx into_target
, outof_target
;
1790 rtx into_input
, outof_input
;
1791 int left_shift
, outof_word
;
1793 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1794 won't be accurate, so use a new target. */
1795 if (target
== 0 || target
== op0
|| target
== op1
)
1796 target
= gen_reg_rtx (mode
);
1800 /* OUTOF_* is the word we are shifting bits away from, and
1801 INTO_* is the word that we are shifting bits towards, thus
1802 they differ depending on the direction of the shift and
1803 WORDS_BIG_ENDIAN. */
1805 left_shift
= binoptab
== ashl_optab
;
1806 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1808 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1809 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1811 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1812 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1814 if (expand_doubleword_shift (op1_mode
, binoptab
,
1815 outof_input
, into_input
, op1
,
1816 outof_target
, into_target
,
1817 unsignedp
, next_methods
, shift_mask
))
1819 insns
= get_insns ();
1822 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1823 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1830 /* Synthesize double word rotates from single word shifts. */
1831 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1832 && class == MODE_INT
1833 && GET_CODE (op1
) == CONST_INT
1834 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1835 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1836 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1839 rtx into_target
, outof_target
;
1840 rtx into_input
, outof_input
;
1842 int shift_count
, left_shift
, outof_word
;
1844 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1845 won't be accurate, so use a new target. Do this also if target is not
1846 a REG, first because having a register instead may open optimization
1847 opportunities, and second because if target and op0 happen to be MEMs
1848 designating the same location, we would risk clobbering it too early
1849 in the code sequence we generate below. */
1850 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1851 target
= gen_reg_rtx (mode
);
1855 shift_count
= INTVAL (op1
);
1857 /* OUTOF_* is the word we are shifting bits away from, and
1858 INTO_* is the word that we are shifting bits towards, thus
1859 they differ depending on the direction of the shift and
1860 WORDS_BIG_ENDIAN. */
1862 left_shift
= (binoptab
== rotl_optab
);
1863 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1865 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1866 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1868 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1869 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1871 if (shift_count
== BITS_PER_WORD
)
1873 /* This is just a word swap. */
1874 emit_move_insn (outof_target
, into_input
);
1875 emit_move_insn (into_target
, outof_input
);
1880 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1881 rtx first_shift_count
, second_shift_count
;
1882 optab reverse_unsigned_shift
, unsigned_shift
;
1884 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1885 ? lshr_optab
: ashl_optab
);
1887 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1888 ? ashl_optab
: lshr_optab
);
1890 if (shift_count
> BITS_PER_WORD
)
1892 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1893 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1897 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1898 second_shift_count
= GEN_INT (shift_count
);
1901 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1902 outof_input
, first_shift_count
,
1903 NULL_RTX
, unsignedp
, next_methods
);
1904 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1905 into_input
, second_shift_count
,
1906 NULL_RTX
, unsignedp
, next_methods
);
1908 if (into_temp1
!= 0 && into_temp2
!= 0)
1909 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1910 into_target
, unsignedp
, next_methods
);
1914 if (inter
!= 0 && inter
!= into_target
)
1915 emit_move_insn (into_target
, inter
);
1917 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1918 into_input
, first_shift_count
,
1919 NULL_RTX
, unsignedp
, next_methods
);
1920 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1921 outof_input
, second_shift_count
,
1922 NULL_RTX
, unsignedp
, next_methods
);
1924 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1925 inter
= expand_binop (word_mode
, ior_optab
,
1926 outof_temp1
, outof_temp2
,
1927 outof_target
, unsignedp
, next_methods
);
1929 if (inter
!= 0 && inter
!= outof_target
)
1930 emit_move_insn (outof_target
, inter
);
1933 insns
= get_insns ();
1938 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1939 block to help the register allocator a bit. But a multi-word
1940 rotate will need all the input bits when setting the output
1941 bits, so there clearly is a conflict between the input and
1942 output registers. So we can't use a no-conflict block here. */
1948 /* These can be done a word at a time by propagating carries. */
1949 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1950 && class == MODE_INT
1951 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1952 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1955 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1956 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1957 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1958 rtx xop0
, xop1
, xtarget
;
1960 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1961 value is one of those, use it. Otherwise, use 1 since it is the
1962 one easiest to get. */
1963 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1964 int normalizep
= STORE_FLAG_VALUE
;
1969 /* Prepare the operands. */
1970 xop0
= force_reg (mode
, op0
);
1971 xop1
= force_reg (mode
, op1
);
1973 xtarget
= gen_reg_rtx (mode
);
1975 if (target
== 0 || !REG_P (target
))
1978 /* Indicate for flow that the entire target reg is being set. */
1980 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1982 /* Do the actual arithmetic. */
1983 for (i
= 0; i
< nwords
; i
++)
1985 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1986 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1987 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1988 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1991 /* Main add/subtract of the input operands. */
1992 x
= expand_binop (word_mode
, binoptab
,
1993 op0_piece
, op1_piece
,
1994 target_piece
, unsignedp
, next_methods
);
2000 /* Store carry from main add/subtract. */
2001 carry_out
= gen_reg_rtx (word_mode
);
2002 carry_out
= emit_store_flag_force (carry_out
,
2003 (binoptab
== add_optab
2006 word_mode
, 1, normalizep
);
2013 /* Add/subtract previous carry to main result. */
2014 newx
= expand_binop (word_mode
,
2015 normalizep
== 1 ? binoptab
: otheroptab
,
2017 NULL_RTX
, 1, next_methods
);
2021 /* Get out carry from adding/subtracting carry in. */
2022 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2023 carry_tmp
= emit_store_flag_force (carry_tmp
,
2024 (binoptab
== add_optab
2027 word_mode
, 1, normalizep
);
2029 /* Logical-ior the two poss. carry together. */
2030 carry_out
= expand_binop (word_mode
, ior_optab
,
2031 carry_out
, carry_tmp
,
2032 carry_out
, 0, next_methods
);
2036 emit_move_insn (target_piece
, newx
);
2040 if (x
!= target_piece
)
2041 emit_move_insn (target_piece
, x
);
2044 carry_in
= carry_out
;
2047 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2049 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
2050 || ! rtx_equal_p (target
, xtarget
))
2052 rtx temp
= emit_move_insn (target
, xtarget
);
2054 set_unique_reg_note (temp
,
2056 gen_rtx_fmt_ee (binoptab
->code
, mode
,
2067 delete_insns_since (last
);
2070 /* Attempt to synthesize double word multiplies using a sequence of word
2071 mode multiplications. We first attempt to generate a sequence using a
2072 more efficient unsigned widening multiply, and if that fails we then
2073 try using a signed widening multiply. */
2075 if (binoptab
== smul_optab
2076 && class == MODE_INT
2077 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2078 && optab_handler (smul_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
2079 && optab_handler (add_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
2081 rtx product
= NULL_RTX
;
2083 if (optab_handler (umul_widen_optab
, mode
)->insn_code
2084 != CODE_FOR_nothing
)
2086 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2089 delete_insns_since (last
);
2092 if (product
== NULL_RTX
2093 && optab_handler (smul_widen_optab
, mode
)->insn_code
2094 != CODE_FOR_nothing
)
2096 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2099 delete_insns_since (last
);
2102 if (product
!= NULL_RTX
)
2104 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2106 temp
= emit_move_insn (target
? target
: product
, product
);
2107 set_unique_reg_note (temp
,
2109 gen_rtx_fmt_ee (MULT
, mode
,
2117 /* It can't be open-coded in this mode.
2118 Use a library call if one is available and caller says that's ok. */
2120 libfunc
= optab_libfunc (binoptab
, mode
);
2122 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2126 enum machine_mode op1_mode
= mode
;
2131 if (shift_optab_p (binoptab
))
2133 op1_mode
= targetm
.libgcc_shift_count_mode ();
2134 /* Specify unsigned here,
2135 since negative shift counts are meaningless. */
2136 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2139 if (GET_MODE (op0
) != VOIDmode
2140 && GET_MODE (op0
) != mode
)
2141 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2143 /* Pass 1 for NO_QUEUE so we don't lose any increments
2144 if the libcall is cse'd or moved. */
2145 value
= emit_library_call_value (libfunc
,
2146 NULL_RTX
, LCT_CONST
, mode
, 2,
2147 op0
, mode
, op1x
, op1_mode
);
2149 insns
= get_insns ();
2152 target
= gen_reg_rtx (mode
);
2153 emit_libcall_block (insns
, target
, value
,
2154 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
2159 delete_insns_since (last
);
2161 /* It can't be done in this mode. Can we do it in a wider mode? */
2163 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2164 || methods
== OPTAB_MUST_WIDEN
))
2166 /* Caller says, don't even try. */
2167 delete_insns_since (entry_last
);
2171 /* Compute the value of METHODS to pass to recursive calls.
2172 Don't allow widening to be tried recursively. */
2174 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2176 /* Look for a wider mode of the same class for which it appears we can do
2179 if (CLASS_HAS_WIDER_MODES_P (class))
2181 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2182 wider_mode
!= VOIDmode
;
2183 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2185 if ((optab_handler (binoptab
, wider_mode
)->insn_code
2186 != CODE_FOR_nothing
)
2187 || (methods
== OPTAB_LIB
2188 && optab_libfunc (binoptab
, wider_mode
)))
2190 rtx xop0
= op0
, xop1
= op1
;
2193 /* For certain integer operations, we need not actually extend
2194 the narrow operands, as long as we will truncate
2195 the results to the same narrowness. */
2197 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2198 || binoptab
== xor_optab
2199 || binoptab
== add_optab
|| binoptab
== sub_optab
2200 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2201 && class == MODE_INT
)
2204 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2205 unsignedp
, no_extend
);
2207 /* The second operand of a shift must always be extended. */
2208 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2209 no_extend
&& binoptab
!= ashl_optab
);
2211 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2212 unsignedp
, methods
);
2215 if (class != MODE_INT
2216 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2217 GET_MODE_BITSIZE (wider_mode
)))
2220 target
= gen_reg_rtx (mode
);
2221 convert_move (target
, temp
, 0);
2225 return gen_lowpart (mode
, temp
);
2228 delete_insns_since (last
);
2233 delete_insns_since (entry_last
);
2237 /* Expand a binary operator which has both signed and unsigned forms.
2238 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2241 If we widen unsigned operands, we may use a signed wider operation instead
2242 of an unsigned wider operation, since the result would be the same. */
2245 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2246 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2247 enum optab_methods methods
)
2250 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2251 struct optab wide_soptab
;
2253 /* Do it without widening, if possible. */
2254 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2255 unsignedp
, OPTAB_DIRECT
);
2256 if (temp
|| methods
== OPTAB_DIRECT
)
2259 /* Try widening to a signed int. Make a fake signed optab that
2260 hides any signed insn for direct use. */
2261 wide_soptab
= *soptab
;
2262 optab_handler (&wide_soptab
, mode
)->insn_code
= CODE_FOR_nothing
;
2263 /* We don't want to generate new hash table entries from this fake
2265 wide_soptab
.libcall_gen
= NULL
;
2267 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2268 unsignedp
, OPTAB_WIDEN
);
2270 /* For unsigned operands, try widening to an unsigned int. */
2271 if (temp
== 0 && unsignedp
)
2272 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2273 unsignedp
, OPTAB_WIDEN
);
2274 if (temp
|| methods
== OPTAB_WIDEN
)
2277 /* Use the right width lib call if that exists. */
2278 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2279 if (temp
|| methods
== OPTAB_LIB
)
2282 /* Must widen and use a lib call, use either signed or unsigned. */
2283 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2284 unsignedp
, methods
);
2288 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2289 unsignedp
, methods
);
2293 /* Generate code to perform an operation specified by UNOPPTAB
2294 on operand OP0, with two results to TARG0 and TARG1.
2295 We assume that the order of the operands for the instruction
2296 is TARG0, TARG1, OP0.
2298 Either TARG0 or TARG1 may be zero, but what that means is that
2299 the result is not actually wanted. We will generate it into
2300 a dummy pseudo-reg and discard it. They may not both be zero.
2302 Returns 1 if this operation can be performed; 0 if not. */
2305 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2308 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2309 enum mode_class
class;
2310 enum machine_mode wider_mode
;
2311 rtx entry_last
= get_last_insn ();
2314 class = GET_MODE_CLASS (mode
);
2317 targ0
= gen_reg_rtx (mode
);
2319 targ1
= gen_reg_rtx (mode
);
2321 /* Record where to go back to if we fail. */
2322 last
= get_last_insn ();
2324 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2326 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
2327 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2331 if (GET_MODE (xop0
) != VOIDmode
2332 && GET_MODE (xop0
) != mode0
)
2333 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2335 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2336 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2337 xop0
= copy_to_mode_reg (mode0
, xop0
);
2339 /* We could handle this, but we should always be called with a pseudo
2340 for our targets and all insns should take them as outputs. */
2341 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2342 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2344 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2351 delete_insns_since (last
);
2354 /* It can't be done in this mode. Can we do it in a wider mode? */
2356 if (CLASS_HAS_WIDER_MODES_P (class))
2358 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2359 wider_mode
!= VOIDmode
;
2360 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2362 if (optab_handler (unoptab
, wider_mode
)->insn_code
2363 != CODE_FOR_nothing
)
2365 rtx t0
= gen_reg_rtx (wider_mode
);
2366 rtx t1
= gen_reg_rtx (wider_mode
);
2367 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2369 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2371 convert_move (targ0
, t0
, unsignedp
);
2372 convert_move (targ1
, t1
, unsignedp
);
2376 delete_insns_since (last
);
2381 delete_insns_since (entry_last
);
2385 /* Generate code to perform an operation specified by BINOPTAB
2386 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2387 We assume that the order of the operands for the instruction
2388 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2389 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2391 Either TARG0 or TARG1 may be zero, but what that means is that
2392 the result is not actually wanted. We will generate it into
2393 a dummy pseudo-reg and discard it. They may not both be zero.
2395 Returns 1 if this operation can be performed; 0 if not. */
2398 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2401 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2402 enum mode_class
class;
2403 enum machine_mode wider_mode
;
2404 rtx entry_last
= get_last_insn ();
2407 class = GET_MODE_CLASS (mode
);
2410 targ0
= gen_reg_rtx (mode
);
2412 targ1
= gen_reg_rtx (mode
);
2414 /* Record where to go back to if we fail. */
2415 last
= get_last_insn ();
2417 if (optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2419 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
2420 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2421 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2423 rtx xop0
= op0
, xop1
= op1
;
2425 /* If we are optimizing, force expensive constants into a register. */
2426 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
2427 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
2429 /* In case the insn wants input operands in modes different from
2430 those of the actual operands, convert the operands. It would
2431 seem that we don't need to convert CONST_INTs, but we do, so
2432 that they're properly zero-extended, sign-extended or truncated
2435 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2436 xop0
= convert_modes (mode0
,
2437 GET_MODE (op0
) != VOIDmode
2442 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2443 xop1
= convert_modes (mode1
,
2444 GET_MODE (op1
) != VOIDmode
2449 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2450 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2451 xop0
= copy_to_mode_reg (mode0
, xop0
);
2453 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2454 xop1
= copy_to_mode_reg (mode1
, xop1
);
2456 /* We could handle this, but we should always be called with a pseudo
2457 for our targets and all insns should take them as outputs. */
2458 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2459 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2461 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2468 delete_insns_since (last
);
2471 /* It can't be done in this mode. Can we do it in a wider mode? */
2473 if (CLASS_HAS_WIDER_MODES_P (class))
2475 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2476 wider_mode
!= VOIDmode
;
2477 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2479 if (optab_handler (binoptab
, wider_mode
)->insn_code
2480 != CODE_FOR_nothing
)
2482 rtx t0
= gen_reg_rtx (wider_mode
);
2483 rtx t1
= gen_reg_rtx (wider_mode
);
2484 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2485 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2487 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2490 convert_move (targ0
, t0
, unsignedp
);
2491 convert_move (targ1
, t1
, unsignedp
);
2495 delete_insns_since (last
);
2500 delete_insns_since (entry_last
);
2504 /* Expand the two-valued library call indicated by BINOPTAB, but
2505 preserve only one of the values. If TARG0 is non-NULL, the first
2506 value is placed into TARG0; otherwise the second value is placed
2507 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2508 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2509 This routine assumes that the value returned by the library call is
2510 as if the return value was of an integral mode twice as wide as the
2511 mode of OP0. Returns 1 if the call was successful. */
2514 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2515 rtx targ0
, rtx targ1
, enum rtx_code code
)
2517 enum machine_mode mode
;
2518 enum machine_mode libval_mode
;
2523 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2524 gcc_assert (!targ0
!= !targ1
);
2526 mode
= GET_MODE (op0
);
2527 libfunc
= optab_libfunc (binoptab
, mode
);
2531 /* The value returned by the library function will have twice as
2532 many bits as the nominal MODE. */
2533 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2536 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2540 /* Get the part of VAL containing the value that we want. */
2541 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2542 targ0
? 0 : GET_MODE_SIZE (mode
));
2543 insns
= get_insns ();
2545 /* Move the into the desired location. */
2546 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2547 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2553 /* Wrapper around expand_unop which takes an rtx code to specify
2554 the operation to perform, not an optab pointer. All other
2555 arguments are the same. */
2557 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2558 rtx target
, int unsignedp
)
2560 optab unop
= code_to_optab
[(int) code
];
2563 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2569 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2571 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2573 enum mode_class
class = GET_MODE_CLASS (mode
);
2574 if (CLASS_HAS_WIDER_MODES_P (class))
2576 enum machine_mode wider_mode
;
2577 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2578 wider_mode
!= VOIDmode
;
2579 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2581 if (optab_handler (clz_optab
, wider_mode
)->insn_code
2582 != CODE_FOR_nothing
)
2584 rtx xop0
, temp
, last
;
2586 last
= get_last_insn ();
2589 target
= gen_reg_rtx (mode
);
2590 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2591 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2593 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2594 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2595 - GET_MODE_BITSIZE (mode
)),
2596 target
, true, OPTAB_DIRECT
);
2598 delete_insns_since (last
);
2607 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2608 quantities, choosing which based on whether the high word is nonzero. */
2610 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2612 rtx xop0
= force_reg (mode
, op0
);
2613 rtx subhi
= gen_highpart (word_mode
, xop0
);
2614 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2615 rtx hi0_label
= gen_label_rtx ();
2616 rtx after_label
= gen_label_rtx ();
2617 rtx seq
, temp
, result
;
2619 /* If we were not given a target, use a word_mode register, not a
2620 'mode' register. The result will fit, and nobody is expecting
2621 anything bigger (the return type of __builtin_clz* is int). */
2623 target
= gen_reg_rtx (word_mode
);
2625 /* In any case, write to a word_mode scratch in both branches of the
2626 conditional, so we can ensure there is a single move insn setting
2627 'target' to tag a REG_EQUAL note on. */
2628 result
= gen_reg_rtx (word_mode
);
2632 /* If the high word is not equal to zero,
2633 then clz of the full value is clz of the high word. */
2634 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2635 word_mode
, true, hi0_label
);
2637 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2642 convert_move (result
, temp
, true);
2644 emit_jump_insn (gen_jump (after_label
));
2647 /* Else clz of the full value is clz of the low word plus the number
2648 of bits in the high word. */
2649 emit_label (hi0_label
);
2651 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2654 temp
= expand_binop (word_mode
, add_optab
, temp
,
2655 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2656 result
, true, OPTAB_DIRECT
);
2660 convert_move (result
, temp
, true);
2662 emit_label (after_label
);
2663 convert_move (target
, result
, true);
2668 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2680 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2682 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2684 enum mode_class
class = GET_MODE_CLASS (mode
);
2685 enum machine_mode wider_mode
;
2688 if (!CLASS_HAS_WIDER_MODES_P (class))
2691 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2692 wider_mode
!= VOIDmode
;
2693 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2694 if (optab_handler (bswap_optab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
2699 last
= get_last_insn ();
2701 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2702 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2705 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2706 size_int (GET_MODE_BITSIZE (wider_mode
)
2707 - GET_MODE_BITSIZE (mode
)),
2713 target
= gen_reg_rtx (mode
);
2714 emit_move_insn (target
, gen_lowpart (mode
, x
));
2717 delete_insns_since (last
);
2722 /* Try calculating bswap as two bswaps of two word-sized operands. */
2725 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2729 t1
= expand_unop (word_mode
, bswap_optab
,
2730 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2731 t0
= expand_unop (word_mode
, bswap_optab
,
2732 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2735 target
= gen_reg_rtx (mode
);
2737 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2738 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2739 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2744 /* Try calculating (parity x) as (and (popcount x) 1), where
2745 popcount can also be done in a wider mode. */
2747 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2749 enum mode_class
class = GET_MODE_CLASS (mode
);
2750 if (CLASS_HAS_WIDER_MODES_P (class))
2752 enum machine_mode wider_mode
;
2753 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2754 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2756 if (optab_handler (popcount_optab
, wider_mode
)->insn_code
2757 != CODE_FOR_nothing
)
2759 rtx xop0
, temp
, last
;
2761 last
= get_last_insn ();
2764 target
= gen_reg_rtx (mode
);
2765 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2766 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2769 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2770 target
, true, OPTAB_DIRECT
);
2772 delete_insns_since (last
);
2781 /* Try calculating ctz(x) as K - clz(x & -x) ,
2782 where K is GET_MODE_BITSIZE(mode) - 1.
2784 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2785 don't have to worry about what the hardware does in that case. (If
2786 the clz instruction produces the usual value at 0, which is K, the
2787 result of this code sequence will be -1; expand_ffs, below, relies
2788 on this. It might be nice to have it be K instead, for consistency
2789 with the (very few) processors that provide a ctz with a defined
2790 value, but that would take one more instruction, and it would be
2791 less convenient for expand_ffs anyway. */
2794 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2798 if (optab_handler (clz_optab
, mode
)->insn_code
== CODE_FOR_nothing
)
2803 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2805 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2806 true, OPTAB_DIRECT
);
2808 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2810 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_BITSIZE (mode
) - 1),
2812 true, OPTAB_DIRECT
);
2822 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2828 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2829 else with the sequence used by expand_clz.
2831 The ffs builtin promises to return zero for a zero value and ctz/clz
2832 may have an undefined value in that case. If they do not give us a
2833 convenient value, we have to generate a test and branch. */
2835 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2837 HOST_WIDE_INT val
= 0;
2838 bool defined_at_zero
= false;
2841 if (optab_handler (ctz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2845 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2849 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2851 else if (optab_handler (clz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2854 temp
= expand_ctz (mode
, op0
, 0);
2858 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2860 defined_at_zero
= true;
2861 val
= (GET_MODE_BITSIZE (mode
) - 1) - val
;
2867 if (defined_at_zero
&& val
== -1)
2868 /* No correction needed at zero. */;
2871 /* We don't try to do anything clever with the situation found
2872 on some processors (eg Alpha) where ctz(0:mode) ==
2873 bitsize(mode). If someone can think of a way to send N to -1
2874 and leave alone all values in the range 0..N-1 (where N is a
2875 power of two), cheaper than this test-and-branch, please add it.
2877 The test-and-branch is done after the operation itself, in case
2878 the operation sets condition codes that can be recycled for this.
2879 (This is true on i386, for instance.) */
2881 rtx nonzero_label
= gen_label_rtx ();
2882 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2883 mode
, true, nonzero_label
);
2885 convert_move (temp
, GEN_INT (-1), false);
2886 emit_label (nonzero_label
);
2889 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2890 to produce a value in the range 0..bitsize. */
2891 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2892 target
, false, OPTAB_DIRECT
);
2899 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2908 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2909 conditions, VAL may already be a SUBREG against which we cannot generate
2910 a further SUBREG. In this case, we expect forcing the value into a
2911 register will work around the situation. */
2914 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2915 enum machine_mode imode
)
2918 ret
= lowpart_subreg (omode
, val
, imode
);
2921 val
= force_reg (imode
, val
);
2922 ret
= lowpart_subreg (omode
, val
, imode
);
2923 gcc_assert (ret
!= NULL
);
2928 /* Expand a floating point absolute value or negation operation via a
2929 logical operation on the sign bit. */
2932 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2933 rtx op0
, rtx target
)
2935 const struct real_format
*fmt
;
2936 int bitpos
, word
, nwords
, i
;
2937 enum machine_mode imode
;
2938 HOST_WIDE_INT hi
, lo
;
2941 /* The format has to have a simple sign bit. */
2942 fmt
= REAL_MODE_FORMAT (mode
);
2946 bitpos
= fmt
->signbit_rw
;
2950 /* Don't create negative zeros if the format doesn't support them. */
2951 if (code
== NEG
&& !fmt
->has_signed_zero
)
2954 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2956 imode
= int_mode_for_mode (mode
);
2957 if (imode
== BLKmode
)
2966 if (FLOAT_WORDS_BIG_ENDIAN
)
2967 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2969 word
= bitpos
/ BITS_PER_WORD
;
2970 bitpos
= bitpos
% BITS_PER_WORD
;
2971 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2974 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2977 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2981 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2987 if (target
== 0 || target
== op0
)
2988 target
= gen_reg_rtx (mode
);
2994 for (i
= 0; i
< nwords
; ++i
)
2996 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2997 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3001 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3003 immed_double_const (lo
, hi
, imode
),
3004 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3005 if (temp
!= targ_piece
)
3006 emit_move_insn (targ_piece
, temp
);
3009 emit_move_insn (targ_piece
, op0_piece
);
3012 insns
= get_insns ();
3015 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
3016 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
3020 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3021 gen_lowpart (imode
, op0
),
3022 immed_double_const (lo
, hi
, imode
),
3023 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3024 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3026 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
3027 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
3033 /* As expand_unop, but will fail rather than attempt the operation in a
3034 different mode or with a libcall. */
3036 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3039 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
3041 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
3042 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3044 rtx last
= get_last_insn ();
3050 temp
= gen_reg_rtx (mode
);
3052 if (GET_MODE (xop0
) != VOIDmode
3053 && GET_MODE (xop0
) != mode0
)
3054 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
3056 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3058 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
3059 xop0
= copy_to_mode_reg (mode0
, xop0
);
3061 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
3062 temp
= gen_reg_rtx (mode
);
3064 pat
= GEN_FCN (icode
) (temp
, xop0
);
3067 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3068 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
3070 delete_insns_since (last
);
3071 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3079 delete_insns_since (last
);
3084 /* Generate code to perform an operation specified by UNOPTAB
3085 on operand OP0, with result having machine-mode MODE.
3087 UNSIGNEDP is for the case where we have to widen the operands
3088 to perform the operation. It says to use zero-extension.
3090 If TARGET is nonzero, the value
3091 is generated there, if it is convenient to do so.
3092 In all cases an rtx is returned for the locus of the value;
3093 this may or may not be TARGET. */
3096 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3099 enum mode_class
class = GET_MODE_CLASS (mode
);
3100 enum machine_mode wider_mode
;
3104 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3108 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3110 /* Widening (or narrowing) clz needs special treatment. */
3111 if (unoptab
== clz_optab
)
3113 temp
= widen_clz (mode
, op0
, target
);
3117 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3118 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3120 temp
= expand_doubleword_clz (mode
, op0
, target
);
3128 /* Widening (or narrowing) bswap needs special treatment. */
3129 if (unoptab
== bswap_optab
)
3131 temp
= widen_bswap (mode
, op0
, target
);
3135 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3136 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3138 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3146 if (CLASS_HAS_WIDER_MODES_P (class))
3147 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3148 wider_mode
!= VOIDmode
;
3149 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3151 if (optab_handler (unoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
3154 rtx last
= get_last_insn ();
3156 /* For certain operations, we need not actually extend
3157 the narrow operand, as long as we will truncate the
3158 results to the same narrowness. */
3160 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3161 (unoptab
== neg_optab
3162 || unoptab
== one_cmpl_optab
)
3163 && class == MODE_INT
);
3165 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3170 if (class != MODE_INT
3171 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
3172 GET_MODE_BITSIZE (wider_mode
)))
3175 target
= gen_reg_rtx (mode
);
3176 convert_move (target
, temp
, 0);
3180 return gen_lowpart (mode
, temp
);
3183 delete_insns_since (last
);
3187 /* These can be done a word at a time. */
3188 if (unoptab
== one_cmpl_optab
3189 && class == MODE_INT
3190 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3191 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3196 if (target
== 0 || target
== op0
)
3197 target
= gen_reg_rtx (mode
);
3201 /* Do the actual arithmetic. */
3202 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3204 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3205 rtx x
= expand_unop (word_mode
, unoptab
,
3206 operand_subword_force (op0
, i
, mode
),
3207 target_piece
, unsignedp
);
3209 if (target_piece
!= x
)
3210 emit_move_insn (target_piece
, x
);
3213 insns
= get_insns ();
3216 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
3217 gen_rtx_fmt_e (unoptab
->code
, mode
,
3222 if (unoptab
->code
== NEG
)
3224 /* Try negating floating point values by flipping the sign bit. */
3225 if (SCALAR_FLOAT_MODE_P (mode
))
3227 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3232 /* If there is no negation pattern, and we have no negative zero,
3233 try subtracting from zero. */
3234 if (!HONOR_SIGNED_ZEROS (mode
))
3236 temp
= expand_binop (mode
, (unoptab
== negv_optab
3237 ? subv_optab
: sub_optab
),
3238 CONST0_RTX (mode
), op0
, target
,
3239 unsignedp
, OPTAB_DIRECT
);
3245 /* Try calculating parity (x) as popcount (x) % 2. */
3246 if (unoptab
== parity_optab
)
3248 temp
= expand_parity (mode
, op0
, target
);
3253 /* Try implementing ffs (x) in terms of clz (x). */
3254 if (unoptab
== ffs_optab
)
3256 temp
= expand_ffs (mode
, op0
, target
);
3261 /* Try implementing ctz (x) in terms of clz (x). */
3262 if (unoptab
== ctz_optab
)
3264 temp
= expand_ctz (mode
, op0
, target
);
3270 /* Now try a library call in this mode. */
3271 libfunc
= optab_libfunc (unoptab
, mode
);
3276 enum machine_mode outmode
= mode
;
3278 /* All of these functions return small values. Thus we choose to
3279 have them return something that isn't a double-word. */
3280 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3281 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
3283 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
3287 /* Pass 1 for NO_QUEUE so we don't lose any increments
3288 if the libcall is cse'd or moved. */
3289 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3291 insns
= get_insns ();
3294 target
= gen_reg_rtx (outmode
);
3295 emit_libcall_block (insns
, target
, value
,
3296 gen_rtx_fmt_e (unoptab
->code
, outmode
, op0
));
3301 /* It can't be done in this mode. Can we do it in a wider mode? */
3303 if (CLASS_HAS_WIDER_MODES_P (class))
3305 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3306 wider_mode
!= VOIDmode
;
3307 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3309 if ((optab_handler (unoptab
, wider_mode
)->insn_code
3310 != CODE_FOR_nothing
)
3311 || optab_libfunc (unoptab
, wider_mode
))
3314 rtx last
= get_last_insn ();
3316 /* For certain operations, we need not actually extend
3317 the narrow operand, as long as we will truncate the
3318 results to the same narrowness. */
3320 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3321 (unoptab
== neg_optab
3322 || unoptab
== one_cmpl_optab
)
3323 && class == MODE_INT
);
3325 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3328 /* If we are generating clz using wider mode, adjust the
3330 if (unoptab
== clz_optab
&& temp
!= 0)
3331 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3332 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
3333 - GET_MODE_BITSIZE (mode
)),
3334 target
, true, OPTAB_DIRECT
);
3338 if (class != MODE_INT
)
3341 target
= gen_reg_rtx (mode
);
3342 convert_move (target
, temp
, 0);
3346 return gen_lowpart (mode
, temp
);
3349 delete_insns_since (last
);
3354 /* One final attempt at implementing negation via subtraction,
3355 this time allowing widening of the operand. */
3356 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3359 temp
= expand_binop (mode
,
3360 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3361 CONST0_RTX (mode
), op0
,
3362 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3370 /* Emit code to compute the absolute value of OP0, with result to
3371 TARGET if convenient. (TARGET may be 0.) The return value says
3372 where the result actually is to be found.
3374 MODE is the mode of the operand; the mode of the result is
3375 different but can be deduced from MODE.
3380 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3381 int result_unsignedp
)
3386 result_unsignedp
= 1;
3388 /* First try to do it with a special abs instruction. */
3389 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3394 /* For floating point modes, try clearing the sign bit. */
3395 if (SCALAR_FLOAT_MODE_P (mode
))
3397 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3402 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3403 if (optab_handler (smax_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3404 && !HONOR_SIGNED_ZEROS (mode
))
3406 rtx last
= get_last_insn ();
3408 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3410 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3416 delete_insns_since (last
);
3419 /* If this machine has expensive jumps, we can do integer absolute
3420 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3421 where W is the width of MODE. */
3423 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
3425 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3426 size_int (GET_MODE_BITSIZE (mode
) - 1),
3429 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3432 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3433 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3443 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3444 int result_unsignedp
, int safe
)
3449 result_unsignedp
= 1;
3451 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3455 /* If that does not win, use conditional jump and negate. */
3457 /* It is safe to use the target if it is the same
3458 as the source if this is also a pseudo register */
3459 if (op0
== target
&& REG_P (op0
)
3460 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3463 op1
= gen_label_rtx ();
3464 if (target
== 0 || ! safe
3465 || GET_MODE (target
) != mode
3466 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3468 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3469 target
= gen_reg_rtx (mode
);
3471 emit_move_insn (target
, op0
);
3474 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3475 NULL_RTX
, NULL_RTX
, op1
);
3477 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3480 emit_move_insn (target
, op0
);
3486 /* A subroutine of expand_copysign, perform the copysign operation using the
3487 abs and neg primitives advertised to exist on the target. The assumption
3488 is that we have a split register file, and leaving op0 in fp registers,
3489 and not playing with subregs so much, will help the register allocator. */
3492 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3493 int bitpos
, bool op0_is_abs
)
3495 enum machine_mode imode
;
3502 /* Check if the back end provides an insn that handles signbit for the
3504 icode
= (int) signbit_optab
->handlers
[(int) mode
].insn_code
;
3505 if (icode
!= CODE_FOR_nothing
)
3507 imode
= insn_data
[icode
].operand
[0].mode
;
3508 sign
= gen_reg_rtx (imode
);
3509 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3513 HOST_WIDE_INT hi
, lo
;
3515 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3517 imode
= int_mode_for_mode (mode
);
3518 if (imode
== BLKmode
)
3520 op1
= gen_lowpart (imode
, op1
);
3527 if (FLOAT_WORDS_BIG_ENDIAN
)
3528 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3530 word
= bitpos
/ BITS_PER_WORD
;
3531 bitpos
= bitpos
% BITS_PER_WORD
;
3532 op1
= operand_subword_force (op1
, word
, mode
);
3535 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3538 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3542 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3546 sign
= gen_reg_rtx (imode
);
3547 sign
= expand_binop (imode
, and_optab
, op1
,
3548 immed_double_const (lo
, hi
, imode
),
3549 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3554 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3561 if (target
== NULL_RTX
)
3562 target
= copy_to_reg (op0
);
3564 emit_move_insn (target
, op0
);
3567 label
= gen_label_rtx ();
3568 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3570 if (GET_CODE (op0
) == CONST_DOUBLE
)
3571 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3573 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3575 emit_move_insn (target
, op0
);
3583 /* A subroutine of expand_copysign, perform the entire copysign operation
3584 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3585 is true if op0 is known to have its sign bit clear. */
3588 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3589 int bitpos
, bool op0_is_abs
)
3591 enum machine_mode imode
;
3592 HOST_WIDE_INT hi
, lo
;
3593 int word
, nwords
, i
;
3596 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3598 imode
= int_mode_for_mode (mode
);
3599 if (imode
== BLKmode
)
3608 if (FLOAT_WORDS_BIG_ENDIAN
)
3609 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3611 word
= bitpos
/ BITS_PER_WORD
;
3612 bitpos
= bitpos
% BITS_PER_WORD
;
3613 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3616 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3619 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3623 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3627 if (target
== 0 || target
== op0
|| target
== op1
)
3628 target
= gen_reg_rtx (mode
);
3634 for (i
= 0; i
< nwords
; ++i
)
3636 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3637 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3642 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3643 immed_double_const (~lo
, ~hi
, imode
),
3644 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3646 op1
= expand_binop (imode
, and_optab
,
3647 operand_subword_force (op1
, i
, mode
),
3648 immed_double_const (lo
, hi
, imode
),
3649 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3651 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3652 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3653 if (temp
!= targ_piece
)
3654 emit_move_insn (targ_piece
, temp
);
3657 emit_move_insn (targ_piece
, op0_piece
);
3660 insns
= get_insns ();
3663 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3667 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3668 immed_double_const (lo
, hi
, imode
),
3669 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3671 op0
= gen_lowpart (imode
, op0
);
3673 op0
= expand_binop (imode
, and_optab
, op0
,
3674 immed_double_const (~lo
, ~hi
, imode
),
3675 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3677 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3678 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3679 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3685 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3686 scalar floating point mode. Return NULL if we do not know how to
3687 expand the operation inline. */
3690 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3692 enum machine_mode mode
= GET_MODE (op0
);
3693 const struct real_format
*fmt
;
3697 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3698 gcc_assert (GET_MODE (op1
) == mode
);
3700 /* First try to do it with a special instruction. */
3701 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3702 target
, 0, OPTAB_DIRECT
);
3706 fmt
= REAL_MODE_FORMAT (mode
);
3707 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3711 if (GET_CODE (op0
) == CONST_DOUBLE
)
3713 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3714 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3718 if (fmt
->signbit_ro
>= 0
3719 && (GET_CODE (op0
) == CONST_DOUBLE
3720 || (optab_handler (neg_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3721 && optab_handler (abs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)))
3723 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3724 fmt
->signbit_ro
, op0_is_abs
);
3729 if (fmt
->signbit_rw
< 0)
3731 return expand_copysign_bit (mode
, op0
, op1
, target
,
3732 fmt
->signbit_rw
, op0_is_abs
);
3735 /* Generate an instruction whose insn-code is INSN_CODE,
3736 with two operands: an output TARGET and an input OP0.
3737 TARGET *must* be nonzero, and the output is always stored there.
3738 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3739 the value that is stored into TARGET. */
3742 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3745 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3750 /* Now, if insn does not accept our operands, put them into pseudos. */
3752 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3753 op0
= copy_to_mode_reg (mode0
, op0
);
3755 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3756 temp
= gen_reg_rtx (GET_MODE (temp
));
3758 pat
= GEN_FCN (icode
) (temp
, op0
);
3760 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3761 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3766 emit_move_insn (target
, temp
);
3769 struct no_conflict_data
3771 rtx target
, first
, insn
;
3775 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3776 Set P->must_stay if the currently examined clobber / store has to stay
3777 in the list of insns that constitute the actual no_conflict block /
3780 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3782 struct no_conflict_data
*p
= p0
;
3784 /* If this inns directly contributes to setting the target, it must stay. */
3785 if (reg_overlap_mentioned_p (p
->target
, dest
))
3786 p
->must_stay
= true;
3787 /* If we haven't committed to keeping any other insns in the list yet,
3788 there is nothing more to check. */
3789 else if (p
->insn
== p
->first
)
3791 /* If this insn sets / clobbers a register that feeds one of the insns
3792 already in the list, this insn has to stay too. */
3793 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3794 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3795 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3796 /* Likewise if this insn depends on a register set by a previous
3797 insn in the list, or if it sets a result (presumably a hard
3798 register) that is set or clobbered by a previous insn.
3799 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3800 SET_DEST perform the former check on the address, and the latter
3801 check on the MEM. */
3802 || (GET_CODE (set
) == SET
3803 && (modified_in_p (SET_SRC (set
), p
->first
)
3804 || modified_in_p (SET_DEST (set
), p
->first
)
3805 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3806 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3807 p
->must_stay
= true;
3810 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3811 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3812 is possible to do so. */
3815 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3817 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3819 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3820 encapsulated region would not be in one basic block, i.e. when
3821 there is a control_flow_insn_p insn between FIRST and LAST. */
3822 bool attach_libcall_retval_notes
= true;
3823 rtx insn
, next
= NEXT_INSN (last
);
3825 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3826 if (control_flow_insn_p (insn
))
3828 attach_libcall_retval_notes
= false;
3832 if (attach_libcall_retval_notes
)
3834 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3836 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3842 /* Emit code to perform a series of operations on a multi-word quantity, one
3845 Such a block is preceded by a CLOBBER of the output, consists of multiple
3846 insns, each setting one word of the output, and followed by a SET copying
3847 the output to itself.
3849 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3850 note indicating that it doesn't conflict with the (also multi-word)
3851 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3854 INSNS is a block of code generated to perform the operation, not including
3855 the CLOBBER and final copy. All insns that compute intermediate values
3856 are first emitted, followed by the block as described above.
3858 TARGET, OP0, and OP1 are the output and inputs of the operations,
3859 respectively. OP1 may be zero for a unary operation.
3861 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3864 If TARGET is not a register, INSNS is simply emitted with no special
3865 processing. Likewise if anything in INSNS is not an INSN or if
3866 there is a libcall block inside INSNS.
3868 The final insn emitted is returned. */
3871 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3873 rtx prev
, next
, first
, last
, insn
;
3875 if (!REG_P (target
) || reload_in_progress
)
3876 return emit_insn (insns
);
3878 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3879 if (!NONJUMP_INSN_P (insn
)
3880 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3881 return emit_insn (insns
);
3883 /* First emit all insns that do not store into words of the output and remove
3884 these from the list. */
3885 for (insn
= insns
; insn
; insn
= next
)
3888 struct no_conflict_data data
;
3890 next
= NEXT_INSN (insn
);
3892 /* Some ports (cris) create a libcall regions at their own. We must
3893 avoid any potential nesting of LIBCALLs. */
3894 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3895 remove_note (insn
, note
);
3896 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3897 remove_note (insn
, note
);
3899 data
.target
= target
;
3903 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3904 if (! data
.must_stay
)
3906 if (PREV_INSN (insn
))
3907 NEXT_INSN (PREV_INSN (insn
)) = next
;
3912 PREV_INSN (next
) = PREV_INSN (insn
);
3918 prev
= get_last_insn ();
3920 /* Now write the CLOBBER of the output, followed by the setting of each
3921 of the words, followed by the final copy. */
3922 if (target
!= op0
&& target
!= op1
)
3923 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3925 for (insn
= insns
; insn
; insn
= next
)
3927 next
= NEXT_INSN (insn
);
3930 if (op1
&& REG_P (op1
))
3931 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3934 if (op0
&& REG_P (op0
))
3935 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3939 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
3940 != CODE_FOR_nothing
)
3942 last
= emit_move_insn (target
, target
);
3944 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3948 last
= get_last_insn ();
3950 /* Remove any existing REG_EQUAL note from "last", or else it will
3951 be mistaken for a note referring to the full contents of the
3952 alleged libcall value when found together with the REG_RETVAL
3953 note added below. An existing note can come from an insn
3954 expansion at "last". */
3955 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3959 first
= get_insns ();
3961 first
= NEXT_INSN (prev
);
3963 maybe_encapsulate_block (first
, last
, equiv
);
3968 /* Emit code to make a call to a constant function or a library call.
3970 INSNS is a list containing all insns emitted in the call.
3971 These insns leave the result in RESULT. Our block is to copy RESULT
3972 to TARGET, which is logically equivalent to EQUIV.
3974 We first emit any insns that set a pseudo on the assumption that these are
3975 loading constants into registers; doing so allows them to be safely cse'ed
3976 between blocks. Then we emit all the other insns in the block, followed by
3977 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3978 note with an operand of EQUIV.
3980 Moving assignments to pseudos outside of the block is done to improve
3981 the generated code, but is not required to generate correct code,
3982 hence being unable to move an assignment is not grounds for not making
3983 a libcall block. There are two reasons why it is safe to leave these
3984 insns inside the block: First, we know that these pseudos cannot be
3985 used in generated RTL outside the block since they are created for
3986 temporary purposes within the block. Second, CSE will not record the
3987 values of anything set inside a libcall block, so we know they must
3988 be dead at the end of the block.
3990 Except for the first group of insns (the ones setting pseudos), the
3991 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3993 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3995 rtx final_dest
= target
;
3996 rtx prev
, next
, first
, last
, insn
;
3998 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3999 into a MEM later. Protect the libcall block from this change. */
4000 if (! REG_P (target
) || REG_USERVAR_P (target
))
4001 target
= gen_reg_rtx (GET_MODE (target
));
4003 /* If we're using non-call exceptions, a libcall corresponding to an
4004 operation that may trap may also trap. */
4005 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
4007 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4010 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4012 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
4013 remove_note (insn
, note
);
4017 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4018 reg note to indicate that this call cannot throw or execute a nonlocal
4019 goto (unless there is already a REG_EH_REGION note, in which case
4021 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4024 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4027 XEXP (note
, 0) = constm1_rtx
;
4029 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
4033 /* First emit all insns that set pseudos. Remove them from the list as
4034 we go. Avoid insns that set pseudos which were referenced in previous
4035 insns. These can be generated by move_by_pieces, for example,
4036 to update an address. Similarly, avoid insns that reference things
4037 set in previous insns. */
4039 for (insn
= insns
; insn
; insn
= next
)
4041 rtx set
= single_set (insn
);
4044 /* Some ports (cris) create a libcall regions at their own. We must
4045 avoid any potential nesting of LIBCALLs. */
4046 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
4047 remove_note (insn
, note
);
4048 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
4049 remove_note (insn
, note
);
4051 next
= NEXT_INSN (insn
);
4053 if (set
!= 0 && REG_P (SET_DEST (set
))
4054 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4056 struct no_conflict_data data
;
4058 data
.target
= const0_rtx
;
4062 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
4063 if (! data
.must_stay
)
4065 if (PREV_INSN (insn
))
4066 NEXT_INSN (PREV_INSN (insn
)) = next
;
4071 PREV_INSN (next
) = PREV_INSN (insn
);
4077 /* Some ports use a loop to copy large arguments onto the stack.
4078 Don't move anything outside such a loop. */
4083 prev
= get_last_insn ();
4085 /* Write the remaining insns followed by the final copy. */
4087 for (insn
= insns
; insn
; insn
= next
)
4089 next
= NEXT_INSN (insn
);
4094 last
= emit_move_insn (target
, result
);
4095 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
4096 != CODE_FOR_nothing
)
4097 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
4100 /* Remove any existing REG_EQUAL note from "last", or else it will
4101 be mistaken for a note referring to the full contents of the
4102 libcall value when found together with the REG_RETVAL note added
4103 below. An existing note can come from an insn expansion at
4105 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
4108 if (final_dest
!= target
)
4109 emit_move_insn (final_dest
, target
);
4112 first
= get_insns ();
4114 first
= NEXT_INSN (prev
);
4116 maybe_encapsulate_block (first
, last
, equiv
);
4119 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4120 PURPOSE describes how this comparison will be used. CODE is the rtx
4121 comparison code we will be using.
4123 ??? Actually, CODE is slightly weaker than that. A target is still
4124 required to implement all of the normal bcc operations, but not
4125 required to implement all (or any) of the unordered bcc operations. */
4128 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
4129 enum can_compare_purpose purpose
)
4133 if (optab_handler (cmp_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4135 if (purpose
== ccp_jump
)
4136 return bcc_gen_fctn
[(int) code
] != NULL
;
4137 else if (purpose
== ccp_store_flag
)
4138 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
4140 /* There's only one cmov entry point, and it's allowed to fail. */
4143 if (purpose
== ccp_jump
4144 && optab_handler (cbranch_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4146 if (purpose
== ccp_cmov
4147 && optab_handler (cmov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4149 if (purpose
== ccp_store_flag
4150 && optab_handler (cstore_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4152 mode
= GET_MODE_WIDER_MODE (mode
);
4154 while (mode
!= VOIDmode
);
4159 /* This function is called when we are going to emit a compare instruction that
4160 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4162 *PMODE is the mode of the inputs (in case they are const_int).
4163 *PUNSIGNEDP nonzero says that the operands are unsigned;
4164 this matters if they need to be widened.
4166 If they have mode BLKmode, then SIZE specifies the size of both operands.
4168 This function performs all the setup necessary so that the caller only has
4169 to emit a single comparison insn. This setup can involve doing a BLKmode
4170 comparison or emitting a library call to perform the comparison if no insn
4171 is available to handle it.
4172 The values which are passed in through pointers can be modified; the caller
4173 should perform the comparison on the modified values. Constant
4174 comparisons must have already been folded. */
4177 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
4178 enum machine_mode
*pmode
, int *punsignedp
,
4179 enum can_compare_purpose purpose
)
4181 enum machine_mode mode
= *pmode
;
4182 rtx x
= *px
, y
= *py
;
4183 int unsignedp
= *punsignedp
;
4186 /* If we are inside an appropriately-short loop and we are optimizing,
4187 force expensive constants into a register. */
4188 if (CONSTANT_P (x
) && optimize
4189 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
4190 x
= force_reg (mode
, x
);
4192 if (CONSTANT_P (y
) && optimize
4193 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
4194 y
= force_reg (mode
, y
);
4197 /* Make sure if we have a canonical comparison. The RTL
4198 documentation states that canonical comparisons are required only
4199 for targets which have cc0. */
4200 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4203 /* Don't let both operands fail to indicate the mode. */
4204 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4205 x
= force_reg (mode
, x
);
4207 /* Handle all BLKmode compares. */
4209 if (mode
== BLKmode
)
4211 enum machine_mode cmp_mode
, result_mode
;
4212 enum insn_code cmp_code
;
4217 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4221 /* Try to use a memory block compare insn - either cmpstr
4222 or cmpmem will do. */
4223 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4224 cmp_mode
!= VOIDmode
;
4225 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4227 cmp_code
= cmpmem_optab
[cmp_mode
];
4228 if (cmp_code
== CODE_FOR_nothing
)
4229 cmp_code
= cmpstr_optab
[cmp_mode
];
4230 if (cmp_code
== CODE_FOR_nothing
)
4231 cmp_code
= cmpstrn_optab
[cmp_mode
];
4232 if (cmp_code
== CODE_FOR_nothing
)
4235 /* Must make sure the size fits the insn's mode. */
4236 if ((GET_CODE (size
) == CONST_INT
4237 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4238 || (GET_MODE_BITSIZE (GET_MODE (size
))
4239 > GET_MODE_BITSIZE (cmp_mode
)))
4242 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4243 result
= gen_reg_rtx (result_mode
);
4244 size
= convert_to_mode (cmp_mode
, size
, 1);
4245 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4249 *pmode
= result_mode
;
4253 /* Otherwise call a library function, memcmp. */
4254 libfunc
= memcmp_libfunc
;
4255 length_type
= sizetype
;
4256 result_mode
= TYPE_MODE (integer_type_node
);
4257 cmp_mode
= TYPE_MODE (length_type
);
4258 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4259 TYPE_UNSIGNED (length_type
));
4261 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
4268 *pmode
= result_mode
;
4272 /* Don't allow operands to the compare to trap, as that can put the
4273 compare and branch in different basic blocks. */
4274 if (flag_non_call_exceptions
)
4277 x
= force_reg (mode
, x
);
4279 y
= force_reg (mode
, y
);
4284 if (can_compare_p (*pcomparison
, mode
, purpose
))
4287 /* Handle a lib call just for the mode we are using. */
4289 libfunc
= optab_libfunc (cmp_optab
, mode
);
4290 if (libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
4294 /* If we want unsigned, and this mode has a distinct unsigned
4295 comparison routine, use that. */
4298 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4303 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4304 targetm
.libgcc_cmp_return_mode (),
4305 2, x
, mode
, y
, mode
);
4307 /* There are two kinds of comparison routines. Biased routines
4308 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4309 of gcc expect that the comparison operation is equivalent
4310 to the modified comparison. For signed comparisons compare the
4311 result against 1 in the biased case, and zero in the unbiased
4312 case. For unsigned comparisons always compare against 1 after
4313 biasing the unbiased result by adding 1. This gives us a way to
4319 if (!TARGET_LIB_INT_CMP_BIASED
)
4322 *px
= plus_constant (result
, 1);
4329 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
4330 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4333 /* Before emitting an insn with code ICODE, make sure that X, which is going
4334 to be used for operand OPNUM of the insn, is converted from mode MODE to
4335 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4336 that it is accepted by the operand predicate. Return the new value. */
4339 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
4340 enum machine_mode wider_mode
, int unsignedp
)
4342 if (mode
!= wider_mode
)
4343 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4345 if (!insn_data
[icode
].operand
[opnum
].predicate
4346 (x
, insn_data
[icode
].operand
[opnum
].mode
))
4348 if (reload_completed
)
4350 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
4356 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4357 we can do the comparison.
4358 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4359 be NULL_RTX which indicates that only a comparison is to be generated. */
4362 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
4363 enum rtx_code comparison
, int unsignedp
, rtx label
)
4365 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
4366 enum mode_class
class = GET_MODE_CLASS (mode
);
4367 enum machine_mode wider_mode
= mode
;
4369 /* Try combined insns first. */
4372 enum insn_code icode
;
4373 PUT_MODE (test
, wider_mode
);
4377 icode
= optab_handler (cbranch_optab
, wider_mode
)->insn_code
;
4379 if (icode
!= CODE_FOR_nothing
4380 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
4382 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
4383 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
4384 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
4389 /* Handle some compares against zero. */
4390 icode
= (int) optab_handler (tst_optab
, wider_mode
)->insn_code
;
4391 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
4393 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4394 emit_insn (GEN_FCN (icode
) (x
));
4396 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4400 /* Handle compares for which there is a directly suitable insn. */
4402 icode
= (int) optab_handler (cmp_optab
, wider_mode
)->insn_code
;
4403 if (icode
!= CODE_FOR_nothing
)
4405 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4406 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
4407 emit_insn (GEN_FCN (icode
) (x
, y
));
4409 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4413 if (!CLASS_HAS_WIDER_MODES_P (class))
4416 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
4418 while (wider_mode
!= VOIDmode
);
4423 /* Generate code to compare X with Y so that the condition codes are
4424 set and to jump to LABEL if the condition is true. If X is a
4425 constant and Y is not a constant, then the comparison is swapped to
4426 ensure that the comparison RTL has the canonical form.
4428 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4429 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4430 the proper branch condition code.
4432 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4434 MODE is the mode of the inputs (in case they are const_int).
4436 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4437 be passed unchanged to emit_cmp_insn, then potentially converted into an
4438 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4441 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4442 enum machine_mode mode
, int unsignedp
, rtx label
)
4444 rtx op0
= x
, op1
= y
;
4446 /* Swap operands and condition to ensure canonical RTL. */
4447 if (swap_commutative_operands_p (x
, y
))
4449 /* If we're not emitting a branch, callers are required to pass
4450 operands in an order conforming to canonical RTL. We relax this
4451 for commutative comparisons so callers using EQ don't need to do
4452 swapping by hand. */
4453 gcc_assert (label
|| (comparison
== swap_condition (comparison
)));
4456 comparison
= swap_condition (comparison
);
4460 /* If OP0 is still a constant, then both X and Y must be constants.
4461 Force X into a register to create canonical RTL. */
4462 if (CONSTANT_P (op0
))
4463 op0
= force_reg (mode
, op0
);
4467 comparison
= unsigned_condition (comparison
);
4469 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
4471 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
4474 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4477 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4478 enum machine_mode mode
, int unsignedp
)
4480 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
4483 /* Emit a library call comparison between floating point X and Y.
4484 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4487 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
4488 enum machine_mode
*pmode
, int *punsignedp
)
4490 enum rtx_code comparison
= *pcomparison
;
4491 enum rtx_code swapped
= swap_condition (comparison
);
4492 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4495 enum machine_mode orig_mode
= GET_MODE (x
);
4496 enum machine_mode mode
, cmp_mode
;
4497 rtx value
, target
, insns
, equiv
;
4499 bool reversed_p
= false;
4500 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4502 for (mode
= orig_mode
;
4504 mode
= GET_MODE_WIDER_MODE (mode
))
4506 if ((libfunc
= optab_libfunc (code_to_optab
[comparison
], mode
)))
4509 if ((libfunc
= optab_libfunc (code_to_optab
[swapped
] , mode
)))
4512 tmp
= x
; x
= y
; y
= tmp
;
4513 comparison
= swapped
;
4517 if ((libfunc
= optab_libfunc (code_to_optab
[reversed
], mode
))
4518 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
4520 comparison
= reversed
;
4526 gcc_assert (mode
!= VOIDmode
);
4528 if (mode
!= orig_mode
)
4530 x
= convert_to_mode (mode
, x
, 0);
4531 y
= convert_to_mode (mode
, y
, 0);
4534 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4535 the RTL. The allows the RTL optimizers to delete the libcall if the
4536 condition can be determined at compile-time. */
4537 if (comparison
== UNORDERED
)
4539 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4540 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4541 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4542 temp
, const_true_rtx
, equiv
);
4546 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4547 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4549 rtx true_rtx
, false_rtx
;
4554 true_rtx
= const0_rtx
;
4555 false_rtx
= const_true_rtx
;
4559 true_rtx
= const_true_rtx
;
4560 false_rtx
= const0_rtx
;
4564 true_rtx
= const1_rtx
;
4565 false_rtx
= const0_rtx
;
4569 true_rtx
= const0_rtx
;
4570 false_rtx
= constm1_rtx
;
4574 true_rtx
= constm1_rtx
;
4575 false_rtx
= const0_rtx
;
4579 true_rtx
= const0_rtx
;
4580 false_rtx
= const1_rtx
;
4586 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4587 equiv
, true_rtx
, false_rtx
);
4592 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4593 cmp_mode
, 2, x
, mode
, y
, mode
);
4594 insns
= get_insns ();
4597 target
= gen_reg_rtx (cmp_mode
);
4598 emit_libcall_block (insns
, target
, value
, equiv
);
4600 if (comparison
== UNORDERED
4601 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4602 comparison
= reversed_p
? EQ
: NE
;
4607 *pcomparison
= comparison
;
4611 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4614 emit_indirect_jump (rtx loc
)
4616 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4618 loc
= copy_to_mode_reg (Pmode
, loc
);
4620 emit_jump_insn (gen_indirect_jump (loc
));
4624 #ifdef HAVE_conditional_move
4626 /* Emit a conditional move instruction if the machine supports one for that
4627 condition and machine mode.
4629 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4630 the mode to use should they be constants. If it is VOIDmode, they cannot
4633 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4634 should be stored there. MODE is the mode to use should they be constants.
4635 If it is VOIDmode, they cannot both be constants.
4637 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4638 is not supported. */
4641 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4642 enum machine_mode cmode
, rtx op2
, rtx op3
,
4643 enum machine_mode mode
, int unsignedp
)
4645 rtx tem
, subtarget
, comparison
, insn
;
4646 enum insn_code icode
;
4647 enum rtx_code reversed
;
4649 /* If one operand is constant, make it the second one. Only do this
4650 if the other operand is not constant as well. */
4652 if (swap_commutative_operands_p (op0
, op1
))
4657 code
= swap_condition (code
);
4660 /* get_condition will prefer to generate LT and GT even if the old
4661 comparison was against zero, so undo that canonicalization here since
4662 comparisons against zero are cheaper. */
4663 if (code
== LT
&& op1
== const1_rtx
)
4664 code
= LE
, op1
= const0_rtx
;
4665 else if (code
== GT
&& op1
== constm1_rtx
)
4666 code
= GE
, op1
= const0_rtx
;
4668 if (cmode
== VOIDmode
)
4669 cmode
= GET_MODE (op0
);
4671 if (swap_commutative_operands_p (op2
, op3
)
4672 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4681 if (mode
== VOIDmode
)
4682 mode
= GET_MODE (op2
);
4684 icode
= movcc_gen_code
[mode
];
4686 if (icode
== CODE_FOR_nothing
)
4690 target
= gen_reg_rtx (mode
);
4694 /* If the insn doesn't accept these operands, put them in pseudos. */
4696 if (!insn_data
[icode
].operand
[0].predicate
4697 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4698 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4700 if (!insn_data
[icode
].operand
[2].predicate
4701 (op2
, insn_data
[icode
].operand
[2].mode
))
4702 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4704 if (!insn_data
[icode
].operand
[3].predicate
4705 (op3
, insn_data
[icode
].operand
[3].mode
))
4706 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4708 /* Everything should now be in the suitable form, so emit the compare insn
4709 and then the conditional move. */
4712 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4714 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4715 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4716 return NULL and let the caller figure out how best to deal with this
4718 if (GET_CODE (comparison
) != code
)
4721 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4723 /* If that failed, then give up. */
4729 if (subtarget
!= target
)
4730 convert_move (target
, subtarget
, 0);
4735 /* Return nonzero if a conditional move of mode MODE is supported.
4737 This function is for combine so it can tell whether an insn that looks
4738 like a conditional move is actually supported by the hardware. If we
4739 guess wrong we lose a bit on optimization, but that's it. */
4740 /* ??? sparc64 supports conditionally moving integers values based on fp
4741 comparisons, and vice versa. How do we handle them? */
4744 can_conditionally_move_p (enum machine_mode mode
)
4746 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4752 #endif /* HAVE_conditional_move */
4754 /* Emit a conditional addition instruction if the machine supports one for that
4755 condition and machine mode.
4757 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4758 the mode to use should they be constants. If it is VOIDmode, they cannot
4761 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4762 should be stored there. MODE is the mode to use should they be constants.
4763 If it is VOIDmode, they cannot both be constants.
4765 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4766 is not supported. */
4769 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4770 enum machine_mode cmode
, rtx op2
, rtx op3
,
4771 enum machine_mode mode
, int unsignedp
)
4773 rtx tem
, subtarget
, comparison
, insn
;
4774 enum insn_code icode
;
4775 enum rtx_code reversed
;
4777 /* If one operand is constant, make it the second one. Only do this
4778 if the other operand is not constant as well. */
4780 if (swap_commutative_operands_p (op0
, op1
))
4785 code
= swap_condition (code
);
4788 /* get_condition will prefer to generate LT and GT even if the old
4789 comparison was against zero, so undo that canonicalization here since
4790 comparisons against zero are cheaper. */
4791 if (code
== LT
&& op1
== const1_rtx
)
4792 code
= LE
, op1
= const0_rtx
;
4793 else if (code
== GT
&& op1
== constm1_rtx
)
4794 code
= GE
, op1
= const0_rtx
;
4796 if (cmode
== VOIDmode
)
4797 cmode
= GET_MODE (op0
);
4799 if (swap_commutative_operands_p (op2
, op3
)
4800 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4809 if (mode
== VOIDmode
)
4810 mode
= GET_MODE (op2
);
4812 icode
= optab_handler (addcc_optab
, mode
)->insn_code
;
4814 if (icode
== CODE_FOR_nothing
)
4818 target
= gen_reg_rtx (mode
);
4820 /* If the insn doesn't accept these operands, put them in pseudos. */
4822 if (!insn_data
[icode
].operand
[0].predicate
4823 (target
, insn_data
[icode
].operand
[0].mode
))
4824 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4828 if (!insn_data
[icode
].operand
[2].predicate
4829 (op2
, insn_data
[icode
].operand
[2].mode
))
4830 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4832 if (!insn_data
[icode
].operand
[3].predicate
4833 (op3
, insn_data
[icode
].operand
[3].mode
))
4834 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4836 /* Everything should now be in the suitable form, so emit the compare insn
4837 and then the conditional move. */
4840 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4842 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4843 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4844 return NULL and let the caller figure out how best to deal with this
4846 if (GET_CODE (comparison
) != code
)
4849 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4851 /* If that failed, then give up. */
4857 if (subtarget
!= target
)
4858 convert_move (target
, subtarget
, 0);
4863 /* These functions attempt to generate an insn body, rather than
4864 emitting the insn, but if the gen function already emits them, we
4865 make no attempt to turn them back into naked patterns. */
4867 /* Generate and return an insn body to add Y to X. */
4870 gen_add2_insn (rtx x
, rtx y
)
4872 int icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4874 gcc_assert (insn_data
[icode
].operand
[0].predicate
4875 (x
, insn_data
[icode
].operand
[0].mode
));
4876 gcc_assert (insn_data
[icode
].operand
[1].predicate
4877 (x
, insn_data
[icode
].operand
[1].mode
));
4878 gcc_assert (insn_data
[icode
].operand
[2].predicate
4879 (y
, insn_data
[icode
].operand
[2].mode
));
4881 return GEN_FCN (icode
) (x
, x
, y
);
4884 /* Generate and return an insn body to add r1 and c,
4885 storing the result in r0. */
4888 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4890 int icode
= (int) optab_handler (add_optab
, GET_MODE (r0
))->insn_code
;
4892 if (icode
== CODE_FOR_nothing
4893 || !(insn_data
[icode
].operand
[0].predicate
4894 (r0
, insn_data
[icode
].operand
[0].mode
))
4895 || !(insn_data
[icode
].operand
[1].predicate
4896 (r1
, insn_data
[icode
].operand
[1].mode
))
4897 || !(insn_data
[icode
].operand
[2].predicate
4898 (c
, insn_data
[icode
].operand
[2].mode
)))
4901 return GEN_FCN (icode
) (r0
, r1
, c
);
4905 have_add2_insn (rtx x
, rtx y
)
4909 gcc_assert (GET_MODE (x
) != VOIDmode
);
4911 icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4913 if (icode
== CODE_FOR_nothing
)
4916 if (!(insn_data
[icode
].operand
[0].predicate
4917 (x
, insn_data
[icode
].operand
[0].mode
))
4918 || !(insn_data
[icode
].operand
[1].predicate
4919 (x
, insn_data
[icode
].operand
[1].mode
))
4920 || !(insn_data
[icode
].operand
[2].predicate
4921 (y
, insn_data
[icode
].operand
[2].mode
)))
4927 /* Generate and return an insn body to subtract Y from X. */
4930 gen_sub2_insn (rtx x
, rtx y
)
4932 int icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4934 gcc_assert (insn_data
[icode
].operand
[0].predicate
4935 (x
, insn_data
[icode
].operand
[0].mode
));
4936 gcc_assert (insn_data
[icode
].operand
[1].predicate
4937 (x
, insn_data
[icode
].operand
[1].mode
));
4938 gcc_assert (insn_data
[icode
].operand
[2].predicate
4939 (y
, insn_data
[icode
].operand
[2].mode
));
4941 return GEN_FCN (icode
) (x
, x
, y
);
4944 /* Generate and return an insn body to subtract r1 and c,
4945 storing the result in r0. */
4948 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4950 int icode
= (int) optab_handler (sub_optab
, GET_MODE (r0
))->insn_code
;
4952 if (icode
== CODE_FOR_nothing
4953 || !(insn_data
[icode
].operand
[0].predicate
4954 (r0
, insn_data
[icode
].operand
[0].mode
))
4955 || !(insn_data
[icode
].operand
[1].predicate
4956 (r1
, insn_data
[icode
].operand
[1].mode
))
4957 || !(insn_data
[icode
].operand
[2].predicate
4958 (c
, insn_data
[icode
].operand
[2].mode
)))
4961 return GEN_FCN (icode
) (r0
, r1
, c
);
4965 have_sub2_insn (rtx x
, rtx y
)
4969 gcc_assert (GET_MODE (x
) != VOIDmode
);
4971 icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4973 if (icode
== CODE_FOR_nothing
)
4976 if (!(insn_data
[icode
].operand
[0].predicate
4977 (x
, insn_data
[icode
].operand
[0].mode
))
4978 || !(insn_data
[icode
].operand
[1].predicate
4979 (x
, insn_data
[icode
].operand
[1].mode
))
4980 || !(insn_data
[icode
].operand
[2].predicate
4981 (y
, insn_data
[icode
].operand
[2].mode
)))
4987 /* Generate the body of an instruction to copy Y into X.
4988 It may be a list of insns, if one insn isn't enough. */
4991 gen_move_insn (rtx x
, rtx y
)
4996 emit_move_insn_1 (x
, y
);
5002 /* Return the insn code used to extend FROM_MODE to TO_MODE.
5003 UNSIGNEDP specifies zero-extension instead of sign-extension. If
5004 no such operation exists, CODE_FOR_nothing will be returned. */
5007 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
5011 #ifdef HAVE_ptr_extend
5013 return CODE_FOR_ptr_extend
;
5016 tab
= unsignedp
? zext_optab
: sext_optab
;
5017 return convert_optab_handler (tab
, to_mode
, from_mode
)->insn_code
;
5020 /* Generate the body of an insn to extend Y (with mode MFROM)
5021 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5024 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
5025 enum machine_mode mfrom
, int unsignedp
)
5027 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5028 return GEN_FCN (icode
) (x
, y
);
5031 /* can_fix_p and can_float_p say whether the target machine
5032 can directly convert a given fixed point type to
5033 a given floating point type, or vice versa.
5034 The returned value is the CODE_FOR_... value to use,
5035 or CODE_FOR_nothing if these modes cannot be directly converted.
5037 *TRUNCP_PTR is set to 1 if it is necessary to output
5038 an explicit FTRUNC insn before the fix insn; otherwise 0. */
5040 static enum insn_code
5041 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
5042 int unsignedp
, int *truncp_ptr
)
5045 enum insn_code icode
;
5047 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
5048 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5049 if (icode
!= CODE_FOR_nothing
)
5055 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
5056 for this to work. We need to rework the fix* and ftrunc* patterns
5057 and documentation. */
5058 tab
= unsignedp
? ufix_optab
: sfix_optab
;
5059 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5060 if (icode
!= CODE_FOR_nothing
5061 && optab_handler (ftrunc_optab
, fltmode
)->insn_code
!= CODE_FOR_nothing
)
5068 return CODE_FOR_nothing
;
5071 static enum insn_code
5072 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
5077 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5078 return convert_optab_handler (tab
, fltmode
, fixmode
)->insn_code
;
5081 /* Generate code to convert FROM to floating point
5082 and store in TO. FROM must be fixed point and not VOIDmode.
5083 UNSIGNEDP nonzero means regard FROM as unsigned.
5084 Normally this is done by correcting the final value
5085 if it is negative. */
5088 expand_float (rtx to
, rtx from
, int unsignedp
)
5090 enum insn_code icode
;
5092 enum machine_mode fmode
, imode
;
5093 bool can_do_signed
= false;
5095 /* Crash now, because we won't be able to decide which mode to use. */
5096 gcc_assert (GET_MODE (from
) != VOIDmode
);
5098 /* Look for an insn to do the conversion. Do it in the specified
5099 modes if possible; otherwise convert either input, output or both to
5100 wider mode. If the integer mode is wider than the mode of FROM,
5101 we can do the conversion signed even if the input is unsigned. */
5103 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5104 fmode
= GET_MODE_WIDER_MODE (fmode
))
5105 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
5106 imode
= GET_MODE_WIDER_MODE (imode
))
5108 int doing_unsigned
= unsignedp
;
5110 if (fmode
!= GET_MODE (to
)
5111 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
5114 icode
= can_float_p (fmode
, imode
, unsignedp
);
5115 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5117 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5118 if (scode
!= CODE_FOR_nothing
)
5119 can_do_signed
= true;
5120 if (imode
!= GET_MODE (from
))
5121 icode
= scode
, doing_unsigned
= 0;
5124 if (icode
!= CODE_FOR_nothing
)
5126 if (imode
!= GET_MODE (from
))
5127 from
= convert_to_mode (imode
, from
, unsignedp
);
5129 if (fmode
!= GET_MODE (to
))
5130 target
= gen_reg_rtx (fmode
);
5132 emit_unop_insn (icode
, target
, from
,
5133 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5136 convert_move (to
, target
, 0);
5141 /* Unsigned integer, and no way to convert directly. Convert as signed,
5142 then unconditionally adjust the result. For decimal float values we
5143 do this only if we have already determined that a signed conversion
5144 provides sufficient accuracy. */
5145 if (unsignedp
&& (can_do_signed
|| !DECIMAL_FLOAT_MODE_P (GET_MODE (to
))))
5147 rtx label
= gen_label_rtx ();
5149 REAL_VALUE_TYPE offset
;
5151 /* Look for a usable floating mode FMODE wider than the source and at
5152 least as wide as the target. Using FMODE will avoid rounding woes
5153 with unsigned values greater than the signed maximum value. */
5155 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5156 fmode
= GET_MODE_WIDER_MODE (fmode
))
5157 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
5158 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
5161 if (fmode
== VOIDmode
)
5163 /* There is no such mode. Pretend the target is wide enough. */
5164 fmode
= GET_MODE (to
);
5166 /* Avoid double-rounding when TO is narrower than FROM. */
5167 if ((significand_size (fmode
) + 1)
5168 < GET_MODE_BITSIZE (GET_MODE (from
)))
5171 rtx neglabel
= gen_label_rtx ();
5173 /* Don't use TARGET if it isn't a register, is a hard register,
5174 or is the wrong mode. */
5176 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5177 || GET_MODE (target
) != fmode
)
5178 target
= gen_reg_rtx (fmode
);
5180 imode
= GET_MODE (from
);
5181 do_pending_stack_adjust ();
5183 /* Test whether the sign bit is set. */
5184 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5187 /* The sign bit is not set. Convert as signed. */
5188 expand_float (target
, from
, 0);
5189 emit_jump_insn (gen_jump (label
));
5192 /* The sign bit is set.
5193 Convert to a usable (positive signed) value by shifting right
5194 one bit, while remembering if a nonzero bit was shifted
5195 out; i.e., compute (from & 1) | (from >> 1). */
5197 emit_label (neglabel
);
5198 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5199 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5200 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
5202 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5204 expand_float (target
, temp
, 0);
5206 /* Multiply by 2 to undo the shift above. */
5207 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5208 target
, 0, OPTAB_LIB_WIDEN
);
5210 emit_move_insn (target
, temp
);
5212 do_pending_stack_adjust ();
5218 /* If we are about to do some arithmetic to correct for an
5219 unsigned operand, do it in a pseudo-register. */
5221 if (GET_MODE (to
) != fmode
5222 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5223 target
= gen_reg_rtx (fmode
);
5225 /* Convert as signed integer to floating. */
5226 expand_float (target
, from
, 0);
5228 /* If FROM is negative (and therefore TO is negative),
5229 correct its value by 2**bitwidth. */
5231 do_pending_stack_adjust ();
5232 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5236 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)), fmode
);
5237 temp
= expand_binop (fmode
, add_optab
, target
,
5238 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5239 target
, 0, OPTAB_LIB_WIDEN
);
5241 emit_move_insn (target
, temp
);
5243 do_pending_stack_adjust ();
5248 /* No hardware instruction available; call a library routine. */
5253 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5255 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
5256 from
= convert_to_mode (SImode
, from
, unsignedp
);
5258 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5259 gcc_assert (libfunc
);
5263 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5264 GET_MODE (to
), 1, from
,
5266 insns
= get_insns ();
5269 emit_libcall_block (insns
, target
, value
,
5270 gen_rtx_FLOAT (GET_MODE (to
), from
));
5275 /* Copy result to requested destination
5276 if we have been computing in a temp location. */
5280 if (GET_MODE (target
) == GET_MODE (to
))
5281 emit_move_insn (to
, target
);
5283 convert_move (to
, target
, 0);
5287 /* Generate code to convert FROM to fixed point and store in TO. FROM
5288 must be floating point. */
5291 expand_fix (rtx to
, rtx from
, int unsignedp
)
5293 enum insn_code icode
;
5295 enum machine_mode fmode
, imode
;
5298 /* We first try to find a pair of modes, one real and one integer, at
5299 least as wide as FROM and TO, respectively, in which we can open-code
5300 this conversion. If the integer mode is wider than the mode of TO,
5301 we can do the conversion either signed or unsigned. */
5303 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5304 fmode
= GET_MODE_WIDER_MODE (fmode
))
5305 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5306 imode
= GET_MODE_WIDER_MODE (imode
))
5308 int doing_unsigned
= unsignedp
;
5310 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5311 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5312 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5314 if (icode
!= CODE_FOR_nothing
)
5316 if (fmode
!= GET_MODE (from
))
5317 from
= convert_to_mode (fmode
, from
, 0);
5321 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5322 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5326 if (imode
!= GET_MODE (to
))
5327 target
= gen_reg_rtx (imode
);
5329 emit_unop_insn (icode
, target
, from
,
5330 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5332 convert_move (to
, target
, unsignedp
);
5337 /* For an unsigned conversion, there is one more way to do it.
5338 If we have a signed conversion, we generate code that compares
5339 the real value to the largest representable positive number. If if
5340 is smaller, the conversion is done normally. Otherwise, subtract
5341 one plus the highest signed number, convert, and add it back.
5343 We only need to check all real modes, since we know we didn't find
5344 anything with a wider integer mode.
5346 This code used to extend FP value into mode wider than the destination.
5347 This is needed for decimal float modes which cannot accurately
5348 represent one plus the highest signed number of the same size, but
5349 not for binary modes. Consider, for instance conversion from SFmode
5352 The hot path through the code is dealing with inputs smaller than 2^63
5353 and doing just the conversion, so there is no bits to lose.
5355 In the other path we know the value is positive in the range 2^63..2^64-1
5356 inclusive. (as for other input overflow happens and result is undefined)
5357 So we know that the most important bit set in mantissa corresponds to
5358 2^63. The subtraction of 2^63 should not generate any rounding as it
5359 simply clears out that bit. The rest is trivial. */
5361 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5362 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5363 fmode
= GET_MODE_WIDER_MODE (fmode
))
5364 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5365 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5366 || GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))))
5369 REAL_VALUE_TYPE offset
;
5370 rtx limit
, lab1
, lab2
, insn
;
5372 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5373 real_2expN (&offset
, bitsize
- 1, fmode
);
5374 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5375 lab1
= gen_label_rtx ();
5376 lab2
= gen_label_rtx ();
5378 if (fmode
!= GET_MODE (from
))
5379 from
= convert_to_mode (fmode
, from
, 0);
5381 /* See if we need to do the subtraction. */
5382 do_pending_stack_adjust ();
5383 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5386 /* If not, do the signed "fix" and branch around fixup code. */
5387 expand_fix (to
, from
, 0);
5388 emit_jump_insn (gen_jump (lab2
));
5391 /* Otherwise, subtract 2**(N-1), convert to signed number,
5392 then add 2**(N-1). Do the addition using XOR since this
5393 will often generate better code. */
5395 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5396 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5397 expand_fix (to
, target
, 0);
5398 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5400 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5402 to
, 1, OPTAB_LIB_WIDEN
);
5405 emit_move_insn (to
, target
);
5409 if (optab_handler (mov_optab
, GET_MODE (to
))->insn_code
5410 != CODE_FOR_nothing
)
5412 /* Make a place for a REG_NOTE and add it. */
5413 insn
= emit_move_insn (to
, to
);
5414 set_unique_reg_note (insn
,
5416 gen_rtx_fmt_e (UNSIGNED_FIX
,
5424 /* We can't do it with an insn, so use a library call. But first ensure
5425 that the mode of TO is at least as wide as SImode, since those are the
5426 only library calls we know about. */
5428 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5430 target
= gen_reg_rtx (SImode
);
5432 expand_fix (target
, from
, unsignedp
);
5440 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5441 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5442 gcc_assert (libfunc
);
5446 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5447 GET_MODE (to
), 1, from
,
5449 insns
= get_insns ();
5452 emit_libcall_block (insns
, target
, value
,
5453 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5454 GET_MODE (to
), from
));
5459 if (GET_MODE (to
) == GET_MODE (target
))
5460 emit_move_insn (to
, target
);
5462 convert_move (to
, target
, 0);
5466 /* Generate code to convert FROM or TO a fixed-point.
5467 If UINTP is true, either TO or FROM is an unsigned integer.
5468 If SATP is true, we need to saturate the result. */
5471 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5473 enum machine_mode to_mode
= GET_MODE (to
);
5474 enum machine_mode from_mode
= GET_MODE (from
);
5476 enum rtx_code this_code
;
5477 enum insn_code code
;
5481 if (to_mode
== from_mode
)
5483 emit_move_insn (to
, from
);
5489 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5490 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5494 tab
= satp
? satfract_optab
: fract_optab
;
5495 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5497 code
= tab
->handlers
[to_mode
][from_mode
].insn_code
;
5498 if (code
!= CODE_FOR_nothing
)
5500 emit_unop_insn (code
, to
, from
, this_code
);
5504 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5505 gcc_assert (libfunc
);
5508 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5509 1, from
, from_mode
);
5510 insns
= get_insns ();
5513 emit_libcall_block (insns
, to
, value
,
5514 gen_rtx_fmt_e (tab
->code
, to_mode
, from
));
5517 /* Generate code to convert FROM to fixed point and store in TO. FROM
5518 must be floating point, TO must be signed. Use the conversion optab
5519 TAB to do the conversion. */
5522 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5524 enum insn_code icode
;
5526 enum machine_mode fmode
, imode
;
5528 /* We first try to find a pair of modes, one real and one integer, at
5529 least as wide as FROM and TO, respectively, in which we can open-code
5530 this conversion. If the integer mode is wider than the mode of TO,
5531 we can do the conversion either signed or unsigned. */
5533 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5534 fmode
= GET_MODE_WIDER_MODE (fmode
))
5535 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5536 imode
= GET_MODE_WIDER_MODE (imode
))
5538 icode
= convert_optab_handler (tab
, imode
, fmode
)->insn_code
;
5539 if (icode
!= CODE_FOR_nothing
)
5541 if (fmode
!= GET_MODE (from
))
5542 from
= convert_to_mode (fmode
, from
, 0);
5544 if (imode
!= GET_MODE (to
))
5545 target
= gen_reg_rtx (imode
);
5547 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
5549 convert_move (to
, target
, 0);
5557 /* Report whether we have an instruction to perform the operation
5558 specified by CODE on operands of mode MODE. */
5560 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5562 return (code_to_optab
[(int) code
] != 0
5563 && (optab_handler (code_to_optab
[(int) code
], mode
)->insn_code
5564 != CODE_FOR_nothing
));
5567 /* Set all insn_code fields to CODE_FOR_nothing. */
5570 init_insn_codes (void)
5574 for (i
= 0; i
< (unsigned int) OTI_MAX
; i
++)
5579 op
= &optab_table
[i
];
5580 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5581 optab_handler (op
, j
)->insn_code
= CODE_FOR_nothing
;
5583 for (i
= 0; i
< (unsigned int) COI_MAX
; i
++)
5588 op
= &convert_optab_table
[i
];
5589 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5590 for (k
= 0; k
< NUM_MACHINE_MODES
; k
++)
5591 convert_optab_handler (op
, j
, k
)->insn_code
= CODE_FOR_nothing
;
5595 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5597 init_optab (optab op
, enum rtx_code code
)
5600 code_to_optab
[(int) code
] = op
;
5603 /* Same, but fill in its code as CODE, and do _not_ write it into
5604 the code_to_optab table. */
5606 init_optabv (optab op
, enum rtx_code code
)
5611 /* Conversion optabs never go in the code_to_optab table. */
5613 init_convert_optab (convert_optab op
, enum rtx_code code
)
5618 /* Initialize the libfunc fields of an entire group of entries in some
5619 optab. Each entry is set equal to a string consisting of a leading
5620 pair of underscores followed by a generic operation name followed by
5621 a mode name (downshifted to lowercase) followed by a single character
5622 representing the number of operands for the given operation (which is
5623 usually one of the characters '2', '3', or '4').
5625 OPTABLE is the table in which libfunc fields are to be initialized.
5626 OPNAME is the generic (string) name of the operation.
5627 SUFFIX is the character which specifies the number of operands for
5628 the given generic operation.
5629 MODE is the mode to generate for.
5633 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5635 unsigned opname_len
= strlen (opname
);
5636 const char *mname
= GET_MODE_NAME (mode
);
5637 unsigned mname_len
= strlen (mname
);
5638 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5645 for (q
= opname
; *q
; )
5647 for (q
= mname
; *q
; q
++)
5648 *p
++ = TOLOWER (*q
);
5652 set_optab_libfunc (optable
, mode
,
5653 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5656 /* Like gen_libfunc, but verify that integer operation is involved. */
5659 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5660 enum machine_mode mode
)
5662 int maxsize
= 2 * BITS_PER_WORD
;
5664 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5666 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5667 maxsize
= LONG_LONG_TYPE_SIZE
;
5668 if (GET_MODE_CLASS (mode
) != MODE_INT
5669 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5671 gen_libfunc (optable
, opname
, suffix
, mode
);
5674 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5677 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5678 enum machine_mode mode
)
5682 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5683 gen_libfunc (optable
, opname
, suffix
, mode
);
5684 if (DECIMAL_FLOAT_MODE_P (mode
))
5686 dec_opname
= alloca (sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5687 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5688 depending on the low level floating format used. */
5689 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5690 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5691 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5695 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5698 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5699 enum machine_mode mode
)
5701 if (!ALL_FIXED_POINT_MODE_P (mode
))
5703 gen_libfunc (optable
, opname
, suffix
, mode
);
5706 /* Like gen_libfunc, but verify that signed fixed-point operation is
5710 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5711 enum machine_mode mode
)
5713 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5715 gen_libfunc (optable
, opname
, suffix
, mode
);
5718 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5722 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5723 enum machine_mode mode
)
5725 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5727 gen_libfunc (optable
, opname
, suffix
, mode
);
5730 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5733 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5734 enum machine_mode mode
)
5736 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5737 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5738 if (INTEGRAL_MODE_P (mode
))
5739 gen_int_libfunc (optable
, name
, suffix
, mode
);
5742 /* Like gen_libfunc, but verify that FP or INT operation is involved
5743 and add 'v' suffix for integer operation. */
5746 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5747 enum machine_mode mode
)
5749 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5750 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5751 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5753 int len
= strlen (name
);
5754 char *v_name
= alloca (len
+ 2);
5755 strcpy (v_name
, name
);
5757 v_name
[len
+ 1] = 0;
5758 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5762 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5766 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5767 enum machine_mode mode
)
5769 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5770 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5771 if (INTEGRAL_MODE_P (mode
))
5772 gen_int_libfunc (optable
, name
, suffix
, mode
);
5773 if (ALL_FIXED_POINT_MODE_P (mode
))
5774 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5777 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5781 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5782 enum machine_mode mode
)
5784 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5785 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5786 if (INTEGRAL_MODE_P (mode
))
5787 gen_int_libfunc (optable
, name
, suffix
, mode
);
5788 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5789 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5792 /* Like gen_libfunc, but verify that INT or FIXED operation is
5796 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5797 enum machine_mode mode
)
5799 if (INTEGRAL_MODE_P (mode
))
5800 gen_int_libfunc (optable
, name
, suffix
, mode
);
5801 if (ALL_FIXED_POINT_MODE_P (mode
))
5802 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5805 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5809 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5810 enum machine_mode mode
)
5812 if (INTEGRAL_MODE_P (mode
))
5813 gen_int_libfunc (optable
, name
, suffix
, mode
);
5814 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5815 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5818 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5822 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5823 enum machine_mode mode
)
5825 if (INTEGRAL_MODE_P (mode
))
5826 gen_int_libfunc (optable
, name
, suffix
, mode
);
5827 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5828 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5831 /* Initialize the libfunc fields of an entire group of entries of an
5832 inter-mode-class conversion optab. The string formation rules are
5833 similar to the ones for init_libfuncs, above, but instead of having
5834 a mode name and an operand count these functions have two mode names
5835 and no operand count. */
5838 gen_interclass_conv_libfunc (convert_optab tab
,
5840 enum machine_mode tmode
,
5841 enum machine_mode fmode
)
5843 size_t opname_len
= strlen (opname
);
5844 size_t mname_len
= 0;
5846 const char *fname
, *tname
;
5848 char *libfunc_name
, *suffix
;
5849 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5852 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5853 depends on which underlying decimal floating point format is used. */
5854 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5856 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5858 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5859 nondec_name
[0] = '_';
5860 nondec_name
[1] = '_';
5861 memcpy (&nondec_name
[2], opname
, opname_len
);
5862 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5864 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5867 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5868 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5869 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5871 fname
= GET_MODE_NAME (fmode
);
5872 tname
= GET_MODE_NAME (tmode
);
5874 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5876 libfunc_name
= dec_name
;
5877 suffix
= dec_suffix
;
5881 libfunc_name
= nondec_name
;
5882 suffix
= nondec_suffix
;
5886 for (q
= fname
; *q
; p
++, q
++)
5888 for (q
= tname
; *q
; p
++, q
++)
5893 set_conv_libfunc (tab
, tmode
, fmode
,
5894 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5897 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5898 int->fp conversion. */
5901 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5903 enum machine_mode tmode
,
5904 enum machine_mode fmode
)
5906 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5908 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5910 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5913 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5917 gen_ufloat_conv_libfunc (convert_optab tab
,
5918 const char *opname ATTRIBUTE_UNUSED
,
5919 enum machine_mode tmode
,
5920 enum machine_mode fmode
)
5922 if (DECIMAL_FLOAT_MODE_P (tmode
))
5923 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5925 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5928 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5929 fp->int conversion. */
5932 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5934 enum machine_mode tmode
,
5935 enum machine_mode fmode
)
5937 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5939 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5941 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5944 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5945 fp->int conversion with no decimal floating point involved. */
5948 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5950 enum machine_mode tmode
,
5951 enum machine_mode fmode
)
5953 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5955 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5957 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5960 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5961 The string formation rules are
5962 similar to the ones for init_libfunc, above. */
5965 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5966 enum machine_mode tmode
, enum machine_mode fmode
)
5968 size_t opname_len
= strlen (opname
);
5969 size_t mname_len
= 0;
5971 const char *fname
, *tname
;
5973 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5974 char *libfunc_name
, *suffix
;
5977 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5978 depends on which underlying decimal floating point format is used. */
5979 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5981 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5983 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5984 nondec_name
[0] = '_';
5985 nondec_name
[1] = '_';
5986 memcpy (&nondec_name
[2], opname
, opname_len
);
5987 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5989 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5992 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5993 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5994 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5996 fname
= GET_MODE_NAME (fmode
);
5997 tname
= GET_MODE_NAME (tmode
);
5999 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
6001 libfunc_name
= dec_name
;
6002 suffix
= dec_suffix
;
6006 libfunc_name
= nondec_name
;
6007 suffix
= nondec_suffix
;
6011 for (q
= fname
; *q
; p
++, q
++)
6013 for (q
= tname
; *q
; p
++, q
++)
6019 set_conv_libfunc (tab
, tmode
, fmode
,
6020 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
6023 /* Pick proper libcall for trunc_optab. We need to chose if we do
6024 truncation or extension and interclass or intraclass. */
6027 gen_trunc_conv_libfunc (convert_optab tab
,
6029 enum machine_mode tmode
,
6030 enum machine_mode fmode
)
6032 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6034 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6039 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6040 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6041 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6043 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
6046 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6047 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6048 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6049 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6052 /* Pick proper libcall for extend_optab. We need to chose if we do
6053 truncation or extension and interclass or intraclass. */
6056 gen_extend_conv_libfunc (convert_optab tab
,
6057 const char *opname ATTRIBUTE_UNUSED
,
6058 enum machine_mode tmode
,
6059 enum machine_mode fmode
)
6061 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6063 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6068 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6069 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6070 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6072 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
6075 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6076 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6077 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6078 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6081 /* Pick proper libcall for fract_optab. We need to chose if we do
6082 interclass or intraclass. */
6085 gen_fract_conv_libfunc (convert_optab tab
,
6087 enum machine_mode tmode
,
6088 enum machine_mode fmode
)
6092 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
6095 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6096 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6098 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6101 /* Pick proper libcall for fractuns_optab. */
6104 gen_fractuns_conv_libfunc (convert_optab tab
,
6106 enum machine_mode tmode
,
6107 enum machine_mode fmode
)
6111 /* One mode must be a fixed-point mode, and the other must be an integer
6113 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
6114 || (ALL_FIXED_POINT_MODE_P (fmode
)
6115 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
6118 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6121 /* Pick proper libcall for satfract_optab. We need to chose if we do
6122 interclass or intraclass. */
6125 gen_satfract_conv_libfunc (convert_optab tab
,
6127 enum machine_mode tmode
,
6128 enum machine_mode fmode
)
6132 /* TMODE must be a fixed-point mode. */
6133 if (!ALL_FIXED_POINT_MODE_P (tmode
))
6136 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6137 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6139 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6142 /* Pick proper libcall for satfractuns_optab. */
6145 gen_satfractuns_conv_libfunc (convert_optab tab
,
6147 enum machine_mode tmode
,
6148 enum machine_mode fmode
)
6152 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6153 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
6156 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6160 init_one_libfunc (const char *name
)
6164 /* Create a FUNCTION_DECL that can be passed to
6165 targetm.encode_section_info. */
6166 /* ??? We don't have any type information except for this is
6167 a function. Pretend this is "int foo()". */
6168 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
6169 build_function_type (integer_type_node
, NULL_TREE
));
6170 DECL_ARTIFICIAL (decl
) = 1;
6171 DECL_EXTERNAL (decl
) = 1;
6172 TREE_PUBLIC (decl
) = 1;
6174 symbol
= XEXP (DECL_RTL (decl
), 0);
6176 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6177 are the flags assigned by targetm.encode_section_info. */
6178 SET_SYMBOL_REF_DECL (symbol
, 0);
6183 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6184 MODE to NAME, which should be either 0 or a string constant. */
6186 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
6189 struct libfunc_entry e
;
6190 struct libfunc_entry
**slot
;
6191 e
.optab
= (size_t) (optable
- &optab_table
[0]);
6196 val
= init_one_libfunc (name
);
6199 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6201 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
6202 (*slot
)->optab
= (size_t) (optable
- &optab_table
[0]);
6203 (*slot
)->mode1
= mode
;
6204 (*slot
)->mode2
= VOIDmode
;
6205 (*slot
)->libfunc
= val
;
6208 /* Call this to reset the function entry for one conversion optab
6209 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6210 either 0 or a string constant. */
6212 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
6213 enum machine_mode fmode
, const char *name
)
6216 struct libfunc_entry e
;
6217 struct libfunc_entry
**slot
;
6218 e
.optab
= (size_t) (optable
- &convert_optab_table
[0]);
6223 val
= init_one_libfunc (name
);
6226 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6228 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
6229 (*slot
)->optab
= (size_t) (optable
- &convert_optab_table
[0]);
6230 (*slot
)->mode1
= tmode
;
6231 (*slot
)->mode2
= fmode
;
6232 (*slot
)->libfunc
= val
;
6235 /* Call this to initialize the contents of the optabs
6236 appropriately for the current target machine. */
6242 enum machine_mode int_mode
;
6245 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
6246 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6248 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
6249 setcc_gen_code
[i
] = CODE_FOR_nothing
;
6251 #ifdef HAVE_conditional_move
6252 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6253 movcc_gen_code
[i
] = CODE_FOR_nothing
;
6256 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6258 vcond_gen_code
[i
] = CODE_FOR_nothing
;
6259 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
6262 #if GCC_VERSION >= 4000
6263 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6270 init_optab (add_optab
, PLUS
);
6271 init_optabv (addv_optab
, PLUS
);
6272 init_optab (sub_optab
, MINUS
);
6273 init_optabv (subv_optab
, MINUS
);
6274 init_optab (ssadd_optab
, SS_PLUS
);
6275 init_optab (usadd_optab
, US_PLUS
);
6276 init_optab (sssub_optab
, SS_MINUS
);
6277 init_optab (ussub_optab
, US_MINUS
);
6278 init_optab (smul_optab
, MULT
);
6279 init_optab (ssmul_optab
, SS_MULT
);
6280 init_optab (usmul_optab
, US_MULT
);
6281 init_optabv (smulv_optab
, MULT
);
6282 init_optab (smul_highpart_optab
, UNKNOWN
);
6283 init_optab (umul_highpart_optab
, UNKNOWN
);
6284 init_optab (smul_widen_optab
, UNKNOWN
);
6285 init_optab (umul_widen_optab
, UNKNOWN
);
6286 init_optab (usmul_widen_optab
, UNKNOWN
);
6287 init_optab (smadd_widen_optab
, UNKNOWN
);
6288 init_optab (umadd_widen_optab
, UNKNOWN
);
6289 init_optab (ssmadd_widen_optab
, UNKNOWN
);
6290 init_optab (usmadd_widen_optab
, UNKNOWN
);
6291 init_optab (smsub_widen_optab
, UNKNOWN
);
6292 init_optab (umsub_widen_optab
, UNKNOWN
);
6293 init_optab (ssmsub_widen_optab
, UNKNOWN
);
6294 init_optab (usmsub_widen_optab
, UNKNOWN
);
6295 init_optab (sdiv_optab
, DIV
);
6296 init_optab (ssdiv_optab
, SS_DIV
);
6297 init_optab (usdiv_optab
, US_DIV
);
6298 init_optabv (sdivv_optab
, DIV
);
6299 init_optab (sdivmod_optab
, UNKNOWN
);
6300 init_optab (udiv_optab
, UDIV
);
6301 init_optab (udivmod_optab
, UNKNOWN
);
6302 init_optab (smod_optab
, MOD
);
6303 init_optab (umod_optab
, UMOD
);
6304 init_optab (fmod_optab
, UNKNOWN
);
6305 init_optab (remainder_optab
, UNKNOWN
);
6306 init_optab (ftrunc_optab
, UNKNOWN
);
6307 init_optab (and_optab
, AND
);
6308 init_optab (ior_optab
, IOR
);
6309 init_optab (xor_optab
, XOR
);
6310 init_optab (ashl_optab
, ASHIFT
);
6311 init_optab (ssashl_optab
, SS_ASHIFT
);
6312 init_optab (usashl_optab
, US_ASHIFT
);
6313 init_optab (ashr_optab
, ASHIFTRT
);
6314 init_optab (lshr_optab
, LSHIFTRT
);
6315 init_optab (rotl_optab
, ROTATE
);
6316 init_optab (rotr_optab
, ROTATERT
);
6317 init_optab (smin_optab
, SMIN
);
6318 init_optab (smax_optab
, SMAX
);
6319 init_optab (umin_optab
, UMIN
);
6320 init_optab (umax_optab
, UMAX
);
6321 init_optab (pow_optab
, UNKNOWN
);
6322 init_optab (atan2_optab
, UNKNOWN
);
6324 /* These three have codes assigned exclusively for the sake of
6326 init_optab (mov_optab
, SET
);
6327 init_optab (movstrict_optab
, STRICT_LOW_PART
);
6328 init_optab (cmp_optab
, COMPARE
);
6330 init_optab (storent_optab
, UNKNOWN
);
6332 init_optab (ucmp_optab
, UNKNOWN
);
6333 init_optab (tst_optab
, UNKNOWN
);
6335 init_optab (eq_optab
, EQ
);
6336 init_optab (ne_optab
, NE
);
6337 init_optab (gt_optab
, GT
);
6338 init_optab (ge_optab
, GE
);
6339 init_optab (lt_optab
, LT
);
6340 init_optab (le_optab
, LE
);
6341 init_optab (unord_optab
, UNORDERED
);
6343 init_optab (neg_optab
, NEG
);
6344 init_optab (ssneg_optab
, SS_NEG
);
6345 init_optab (usneg_optab
, US_NEG
);
6346 init_optabv (negv_optab
, NEG
);
6347 init_optab (abs_optab
, ABS
);
6348 init_optabv (absv_optab
, ABS
);
6349 init_optab (addcc_optab
, UNKNOWN
);
6350 init_optab (one_cmpl_optab
, NOT
);
6351 init_optab (bswap_optab
, BSWAP
);
6352 init_optab (ffs_optab
, FFS
);
6353 init_optab (clz_optab
, CLZ
);
6354 init_optab (ctz_optab
, CTZ
);
6355 init_optab (popcount_optab
, POPCOUNT
);
6356 init_optab (parity_optab
, PARITY
);
6357 init_optab (sqrt_optab
, SQRT
);
6358 init_optab (floor_optab
, UNKNOWN
);
6359 init_optab (ceil_optab
, UNKNOWN
);
6360 init_optab (round_optab
, UNKNOWN
);
6361 init_optab (btrunc_optab
, UNKNOWN
);
6362 init_optab (nearbyint_optab
, UNKNOWN
);
6363 init_optab (rint_optab
, UNKNOWN
);
6364 init_optab (sincos_optab
, UNKNOWN
);
6365 init_optab (sin_optab
, UNKNOWN
);
6366 init_optab (asin_optab
, UNKNOWN
);
6367 init_optab (cos_optab
, UNKNOWN
);
6368 init_optab (acos_optab
, UNKNOWN
);
6369 init_optab (exp_optab
, UNKNOWN
);
6370 init_optab (exp10_optab
, UNKNOWN
);
6371 init_optab (exp2_optab
, UNKNOWN
);
6372 init_optab (expm1_optab
, UNKNOWN
);
6373 init_optab (ldexp_optab
, UNKNOWN
);
6374 init_optab (scalb_optab
, UNKNOWN
);
6375 init_optab (logb_optab
, UNKNOWN
);
6376 init_optab (ilogb_optab
, UNKNOWN
);
6377 init_optab (log_optab
, UNKNOWN
);
6378 init_optab (log10_optab
, UNKNOWN
);
6379 init_optab (log2_optab
, UNKNOWN
);
6380 init_optab (log1p_optab
, UNKNOWN
);
6381 init_optab (tan_optab
, UNKNOWN
);
6382 init_optab (atan_optab
, UNKNOWN
);
6383 init_optab (copysign_optab
, UNKNOWN
);
6384 init_optab (signbit_optab
, UNKNOWN
);
6386 init_optab (isinf_optab
, UNKNOWN
);
6388 init_optab (strlen_optab
, UNKNOWN
);
6389 init_optab (cbranch_optab
, UNKNOWN
);
6390 init_optab (cmov_optab
, UNKNOWN
);
6391 init_optab (cstore_optab
, UNKNOWN
);
6392 init_optab (push_optab
, UNKNOWN
);
6394 init_optab (reduc_smax_optab
, UNKNOWN
);
6395 init_optab (reduc_umax_optab
, UNKNOWN
);
6396 init_optab (reduc_smin_optab
, UNKNOWN
);
6397 init_optab (reduc_umin_optab
, UNKNOWN
);
6398 init_optab (reduc_splus_optab
, UNKNOWN
);
6399 init_optab (reduc_uplus_optab
, UNKNOWN
);
6401 init_optab (ssum_widen_optab
, UNKNOWN
);
6402 init_optab (usum_widen_optab
, UNKNOWN
);
6403 init_optab (sdot_prod_optab
, UNKNOWN
);
6404 init_optab (udot_prod_optab
, UNKNOWN
);
6406 init_optab (vec_extract_optab
, UNKNOWN
);
6407 init_optab (vec_extract_even_optab
, UNKNOWN
);
6408 init_optab (vec_extract_odd_optab
, UNKNOWN
);
6409 init_optab (vec_interleave_high_optab
, UNKNOWN
);
6410 init_optab (vec_interleave_low_optab
, UNKNOWN
);
6411 init_optab (vec_set_optab
, UNKNOWN
);
6412 init_optab (vec_init_optab
, UNKNOWN
);
6413 init_optab (vec_shl_optab
, UNKNOWN
);
6414 init_optab (vec_shr_optab
, UNKNOWN
);
6415 init_optab (vec_realign_load_optab
, UNKNOWN
);
6416 init_optab (movmisalign_optab
, UNKNOWN
);
6417 init_optab (vec_widen_umult_hi_optab
, UNKNOWN
);
6418 init_optab (vec_widen_umult_lo_optab
, UNKNOWN
);
6419 init_optab (vec_widen_smult_hi_optab
, UNKNOWN
);
6420 init_optab (vec_widen_smult_lo_optab
, UNKNOWN
);
6421 init_optab (vec_unpacks_hi_optab
, UNKNOWN
);
6422 init_optab (vec_unpacks_lo_optab
, UNKNOWN
);
6423 init_optab (vec_unpacku_hi_optab
, UNKNOWN
);
6424 init_optab (vec_unpacku_lo_optab
, UNKNOWN
);
6425 init_optab (vec_unpacks_float_hi_optab
, UNKNOWN
);
6426 init_optab (vec_unpacks_float_lo_optab
, UNKNOWN
);
6427 init_optab (vec_unpacku_float_hi_optab
, UNKNOWN
);
6428 init_optab (vec_unpacku_float_lo_optab
, UNKNOWN
);
6429 init_optab (vec_pack_trunc_optab
, UNKNOWN
);
6430 init_optab (vec_pack_usat_optab
, UNKNOWN
);
6431 init_optab (vec_pack_ssat_optab
, UNKNOWN
);
6432 init_optab (vec_pack_ufix_trunc_optab
, UNKNOWN
);
6433 init_optab (vec_pack_sfix_trunc_optab
, UNKNOWN
);
6435 init_optab (powi_optab
, UNKNOWN
);
6438 init_convert_optab (sext_optab
, SIGN_EXTEND
);
6439 init_convert_optab (zext_optab
, ZERO_EXTEND
);
6440 init_convert_optab (trunc_optab
, TRUNCATE
);
6441 init_convert_optab (sfix_optab
, FIX
);
6442 init_convert_optab (ufix_optab
, UNSIGNED_FIX
);
6443 init_convert_optab (sfixtrunc_optab
, UNKNOWN
);
6444 init_convert_optab (ufixtrunc_optab
, UNKNOWN
);
6445 init_convert_optab (sfloat_optab
, FLOAT
);
6446 init_convert_optab (ufloat_optab
, UNSIGNED_FLOAT
);
6447 init_convert_optab (lrint_optab
, UNKNOWN
);
6448 init_convert_optab (lround_optab
, UNKNOWN
);
6449 init_convert_optab (lfloor_optab
, UNKNOWN
);
6450 init_convert_optab (lceil_optab
, UNKNOWN
);
6452 init_convert_optab (fract_optab
, FRACT_CONVERT
);
6453 init_convert_optab (fractuns_optab
, UNSIGNED_FRACT_CONVERT
);
6454 init_convert_optab (satfract_optab
, SAT_FRACT
);
6455 init_convert_optab (satfractuns_optab
, UNSIGNED_SAT_FRACT
);
6457 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6459 movmem_optab
[i
] = CODE_FOR_nothing
;
6460 cmpstr_optab
[i
] = CODE_FOR_nothing
;
6461 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
6462 cmpmem_optab
[i
] = CODE_FOR_nothing
;
6463 setmem_optab
[i
] = CODE_FOR_nothing
;
6465 sync_add_optab
[i
] = CODE_FOR_nothing
;
6466 sync_sub_optab
[i
] = CODE_FOR_nothing
;
6467 sync_ior_optab
[i
] = CODE_FOR_nothing
;
6468 sync_and_optab
[i
] = CODE_FOR_nothing
;
6469 sync_xor_optab
[i
] = CODE_FOR_nothing
;
6470 sync_nand_optab
[i
] = CODE_FOR_nothing
;
6471 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
6472 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
6473 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
6474 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
6475 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
6476 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
6477 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
6478 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
6479 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
6480 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
6481 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
6482 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
6483 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
6484 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
6485 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
6486 sync_lock_release
[i
] = CODE_FOR_nothing
;
6488 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
6491 /* Fill in the optabs with the insns we support. */
6494 /* Initialize the optabs with the names of the library functions. */
6495 add_optab
->libcall_basename
= "add";
6496 add_optab
->libcall_suffix
= '3';
6497 add_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6498 addv_optab
->libcall_basename
= "add";
6499 addv_optab
->libcall_suffix
= '3';
6500 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6501 ssadd_optab
->libcall_basename
= "ssadd";
6502 ssadd_optab
->libcall_suffix
= '3';
6503 ssadd_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6504 usadd_optab
->libcall_basename
= "usadd";
6505 usadd_optab
->libcall_suffix
= '3';
6506 usadd_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6507 sub_optab
->libcall_basename
= "sub";
6508 sub_optab
->libcall_suffix
= '3';
6509 sub_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6510 subv_optab
->libcall_basename
= "sub";
6511 subv_optab
->libcall_suffix
= '3';
6512 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6513 sssub_optab
->libcall_basename
= "sssub";
6514 sssub_optab
->libcall_suffix
= '3';
6515 sssub_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6516 ussub_optab
->libcall_basename
= "ussub";
6517 ussub_optab
->libcall_suffix
= '3';
6518 ussub_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6519 smul_optab
->libcall_basename
= "mul";
6520 smul_optab
->libcall_suffix
= '3';
6521 smul_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6522 smulv_optab
->libcall_basename
= "mul";
6523 smulv_optab
->libcall_suffix
= '3';
6524 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6525 ssmul_optab
->libcall_basename
= "ssmul";
6526 ssmul_optab
->libcall_suffix
= '3';
6527 ssmul_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6528 usmul_optab
->libcall_basename
= "usmul";
6529 usmul_optab
->libcall_suffix
= '3';
6530 usmul_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6531 sdiv_optab
->libcall_basename
= "div";
6532 sdiv_optab
->libcall_suffix
= '3';
6533 sdiv_optab
->libcall_gen
= gen_int_fp_signed_fixed_libfunc
;
6534 sdivv_optab
->libcall_basename
= "divv";
6535 sdivv_optab
->libcall_suffix
= '3';
6536 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6537 ssdiv_optab
->libcall_basename
= "ssdiv";
6538 ssdiv_optab
->libcall_suffix
= '3';
6539 ssdiv_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6540 udiv_optab
->libcall_basename
= "udiv";
6541 udiv_optab
->libcall_suffix
= '3';
6542 udiv_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6543 usdiv_optab
->libcall_basename
= "usdiv";
6544 usdiv_optab
->libcall_suffix
= '3';
6545 usdiv_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6546 sdivmod_optab
->libcall_basename
= "divmod";
6547 sdivmod_optab
->libcall_suffix
= '4';
6548 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6549 udivmod_optab
->libcall_basename
= "udivmod";
6550 udivmod_optab
->libcall_suffix
= '4';
6551 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6552 smod_optab
->libcall_basename
= "mod";
6553 smod_optab
->libcall_suffix
= '3';
6554 smod_optab
->libcall_gen
= gen_int_libfunc
;
6555 umod_optab
->libcall_basename
= "umod";
6556 umod_optab
->libcall_suffix
= '3';
6557 umod_optab
->libcall_gen
= gen_int_libfunc
;
6558 ftrunc_optab
->libcall_basename
= "ftrunc";
6559 ftrunc_optab
->libcall_suffix
= '2';
6560 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6561 and_optab
->libcall_basename
= "and";
6562 and_optab
->libcall_suffix
= '3';
6563 and_optab
->libcall_gen
= gen_int_libfunc
;
6564 ior_optab
->libcall_basename
= "ior";
6565 ior_optab
->libcall_suffix
= '3';
6566 ior_optab
->libcall_gen
= gen_int_libfunc
;
6567 xor_optab
->libcall_basename
= "xor";
6568 xor_optab
->libcall_suffix
= '3';
6569 xor_optab
->libcall_gen
= gen_int_libfunc
;
6570 ashl_optab
->libcall_basename
= "ashl";
6571 ashl_optab
->libcall_suffix
= '3';
6572 ashl_optab
->libcall_gen
= gen_int_fixed_libfunc
;
6573 ssashl_optab
->libcall_basename
= "ssashl";
6574 ssashl_optab
->libcall_suffix
= '3';
6575 ssashl_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6576 usashl_optab
->libcall_basename
= "usashl";
6577 usashl_optab
->libcall_suffix
= '3';
6578 usashl_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6579 ashr_optab
->libcall_basename
= "ashr";
6580 ashr_optab
->libcall_suffix
= '3';
6581 ashr_optab
->libcall_gen
= gen_int_signed_fixed_libfunc
;
6582 lshr_optab
->libcall_basename
= "lshr";
6583 lshr_optab
->libcall_suffix
= '3';
6584 lshr_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6585 smin_optab
->libcall_basename
= "min";
6586 smin_optab
->libcall_suffix
= '3';
6587 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6588 smax_optab
->libcall_basename
= "max";
6589 smax_optab
->libcall_suffix
= '3';
6590 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6591 umin_optab
->libcall_basename
= "umin";
6592 umin_optab
->libcall_suffix
= '3';
6593 umin_optab
->libcall_gen
= gen_int_libfunc
;
6594 umax_optab
->libcall_basename
= "umax";
6595 umax_optab
->libcall_suffix
= '3';
6596 umax_optab
->libcall_gen
= gen_int_libfunc
;
6597 neg_optab
->libcall_basename
= "neg";
6598 neg_optab
->libcall_suffix
= '2';
6599 neg_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6600 ssneg_optab
->libcall_basename
= "ssneg";
6601 ssneg_optab
->libcall_suffix
= '2';
6602 ssneg_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6603 usneg_optab
->libcall_basename
= "usneg";
6604 usneg_optab
->libcall_suffix
= '2';
6605 usneg_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6606 negv_optab
->libcall_basename
= "neg";
6607 negv_optab
->libcall_suffix
= '2';
6608 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6609 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6610 one_cmpl_optab
->libcall_suffix
= '2';
6611 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6612 ffs_optab
->libcall_basename
= "ffs";
6613 ffs_optab
->libcall_suffix
= '2';
6614 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6615 clz_optab
->libcall_basename
= "clz";
6616 clz_optab
->libcall_suffix
= '2';
6617 clz_optab
->libcall_gen
= gen_int_libfunc
;
6618 ctz_optab
->libcall_basename
= "ctz";
6619 ctz_optab
->libcall_suffix
= '2';
6620 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6621 popcount_optab
->libcall_basename
= "popcount";
6622 popcount_optab
->libcall_suffix
= '2';
6623 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6624 parity_optab
->libcall_basename
= "parity";
6625 parity_optab
->libcall_suffix
= '2';
6626 parity_optab
->libcall_gen
= gen_int_libfunc
;
6628 /* Comparison libcalls for integers MUST come in pairs,
6630 cmp_optab
->libcall_basename
= "cmp";
6631 cmp_optab
->libcall_suffix
= '2';
6632 cmp_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6633 ucmp_optab
->libcall_basename
= "ucmp";
6634 ucmp_optab
->libcall_suffix
= '2';
6635 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6637 /* EQ etc are floating point only. */
6638 eq_optab
->libcall_basename
= "eq";
6639 eq_optab
->libcall_suffix
= '2';
6640 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6641 ne_optab
->libcall_basename
= "ne";
6642 ne_optab
->libcall_suffix
= '2';
6643 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6644 gt_optab
->libcall_basename
= "gt";
6645 gt_optab
->libcall_suffix
= '2';
6646 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6647 ge_optab
->libcall_basename
= "ge";
6648 ge_optab
->libcall_suffix
= '2';
6649 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6650 lt_optab
->libcall_basename
= "lt";
6651 lt_optab
->libcall_suffix
= '2';
6652 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6653 le_optab
->libcall_basename
= "le";
6654 le_optab
->libcall_suffix
= '2';
6655 le_optab
->libcall_gen
= gen_fp_libfunc
;
6656 unord_optab
->libcall_basename
= "unord";
6657 unord_optab
->libcall_suffix
= '2';
6658 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6660 powi_optab
->libcall_basename
= "powi";
6661 powi_optab
->libcall_suffix
= '2';
6662 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6665 sfloat_optab
->libcall_basename
= "float";
6666 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6667 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6668 sfix_optab
->libcall_basename
= "fix";
6669 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6670 ufix_optab
->libcall_basename
= "fixuns";
6671 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6672 lrint_optab
->libcall_basename
= "lrint";
6673 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6674 lround_optab
->libcall_basename
= "lround";
6675 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6676 lfloor_optab
->libcall_basename
= "lfloor";
6677 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6678 lceil_optab
->libcall_basename
= "lceil";
6679 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6681 /* trunc_optab is also used for FLOAT_EXTEND. */
6682 sext_optab
->libcall_basename
= "extend";
6683 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6684 trunc_optab
->libcall_basename
= "trunc";
6685 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6687 /* Conversions for fixed-point modes and other modes. */
6688 fract_optab
->libcall_basename
= "fract";
6689 fract_optab
->libcall_gen
= gen_fract_conv_libfunc
;
6690 satfract_optab
->libcall_basename
= "satfract";
6691 satfract_optab
->libcall_gen
= gen_satfract_conv_libfunc
;
6692 fractuns_optab
->libcall_basename
= "fractuns";
6693 fractuns_optab
->libcall_gen
= gen_fractuns_conv_libfunc
;
6694 satfractuns_optab
->libcall_basename
= "satfractuns";
6695 satfractuns_optab
->libcall_gen
= gen_satfractuns_conv_libfunc
;
6697 /* The ffs function operates on `int'. Fall back on it if we do not
6698 have a libgcc2 function for that width. */
6699 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6701 int_mode
= mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0);
6702 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6706 /* Explicitly initialize the bswap libfuncs since we need them to be
6707 valid for things other than word_mode. */
6708 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6709 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6711 /* Use cabs for double complex abs, since systems generally have cabs.
6712 Don't define any libcall for float complex, so that cabs will be used. */
6713 if (complex_double_type_node
)
6714 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6716 abort_libfunc
= init_one_libfunc ("abort");
6717 memcpy_libfunc
= init_one_libfunc ("memcpy");
6718 memmove_libfunc
= init_one_libfunc ("memmove");
6719 memcmp_libfunc
= init_one_libfunc ("memcmp");
6720 memset_libfunc
= init_one_libfunc ("memset");
6721 setbits_libfunc
= init_one_libfunc ("__setbits");
6723 #ifndef DONT_USE_BUILTIN_SETJMP
6724 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6725 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6727 setjmp_libfunc
= init_one_libfunc ("setjmp");
6728 longjmp_libfunc
= init_one_libfunc ("longjmp");
6730 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6731 unwind_sjlj_unregister_libfunc
6732 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6734 /* For function entry/exit instrumentation. */
6735 profile_function_entry_libfunc
6736 = init_one_libfunc ("__cyg_profile_func_enter");
6737 profile_function_exit_libfunc
6738 = init_one_libfunc ("__cyg_profile_func_exit");
6740 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6742 if (HAVE_conditional_trap
)
6743 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
6745 /* Allow the target to add more libcalls or rename some, etc. */
6746 targetm
.init_libfuncs ();
6751 /* Print information about the current contents of the optabs on
6755 debug_optab_libfuncs (void)
6761 /* Dump the arithmetic optabs. */
6762 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6763 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6768 o
= &optab_table
[i
];
6769 l
= optab_libfunc (o
, j
);
6772 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6773 fprintf (stderr
, "%s\t%s:\t%s\n",
6774 GET_RTX_NAME (o
->code
),
6780 /* Dump the conversion optabs. */
6781 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6782 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6783 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6788 o
= &convert_optab_table
[i
];
6789 l
= convert_optab_libfunc (o
, j
, k
);
6792 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6793 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6794 GET_RTX_NAME (o
->code
),
6803 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6804 CODE. Return 0 on failure. */
6807 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
6808 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
6810 enum machine_mode mode
= GET_MODE (op1
);
6811 enum insn_code icode
;
6814 if (!HAVE_conditional_trap
)
6817 if (mode
== VOIDmode
)
6820 icode
= optab_handler (cmp_optab
, mode
)->insn_code
;
6821 if (icode
== CODE_FOR_nothing
)
6825 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
6826 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
6832 emit_insn (GEN_FCN (icode
) (op1
, op2
));
6834 PUT_CODE (trap_rtx
, code
);
6835 gcc_assert (HAVE_conditional_trap
);
6836 insn
= gen_conditional_trap (trap_rtx
, tcode
);
6840 insn
= get_insns ();
6847 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6848 or unsigned operation code. */
6850 static enum rtx_code
6851 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6863 code
= unsignedp
? LTU
: LT
;
6866 code
= unsignedp
? LEU
: LE
;
6869 code
= unsignedp
? GTU
: GT
;
6872 code
= unsignedp
? GEU
: GE
;
6875 case UNORDERED_EXPR
:
6906 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6907 unsigned operators. Do not generate compare instruction. */
6910 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6912 enum rtx_code rcode
;
6914 rtx rtx_op0
, rtx_op1
;
6916 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6917 ensures that condition is a relational operation. */
6918 gcc_assert (COMPARISON_CLASS_P (cond
));
6920 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6921 t_op0
= TREE_OPERAND (cond
, 0);
6922 t_op1
= TREE_OPERAND (cond
, 1);
6924 /* Expand operands. */
6925 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6927 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6930 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
6931 && GET_MODE (rtx_op0
) != VOIDmode
)
6932 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
6934 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
6935 && GET_MODE (rtx_op1
) != VOIDmode
)
6936 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
6938 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
6941 /* Return insn code for VEC_COND_EXPR EXPR. */
6943 static inline enum insn_code
6944 get_vcond_icode (tree expr
, enum machine_mode mode
)
6946 enum insn_code icode
= CODE_FOR_nothing
;
6948 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
6949 icode
= vcondu_gen_code
[mode
];
6951 icode
= vcond_gen_code
[mode
];
6955 /* Return TRUE iff, appropriate vector insns are available
6956 for vector cond expr expr in VMODE mode. */
6959 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
6961 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
6966 /* Generate insns for VEC_COND_EXPR. */
6969 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
6971 enum insn_code icode
;
6972 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
6973 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
6974 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
6976 icode
= get_vcond_icode (vec_cond_expr
, mode
);
6977 if (icode
== CODE_FOR_nothing
)
6980 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6981 target
= gen_reg_rtx (mode
);
6983 /* Get comparison rtx. First expand both cond expr operands. */
6984 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
6986 cc_op0
= XEXP (comparison
, 0);
6987 cc_op1
= XEXP (comparison
, 1);
6988 /* Expand both operands and force them in reg, if required. */
6989 rtx_op1
= expand_normal (TREE_OPERAND (vec_cond_expr
, 1));
6990 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
6991 && mode
!= VOIDmode
)
6992 rtx_op1
= force_reg (mode
, rtx_op1
);
6994 rtx_op2
= expand_normal (TREE_OPERAND (vec_cond_expr
, 2));
6995 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
6996 && mode
!= VOIDmode
)
6997 rtx_op2
= force_reg (mode
, rtx_op2
);
6999 /* Emit instruction! */
7000 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
7001 comparison
, cc_op0
, cc_op1
));
7007 /* This is an internal subroutine of the other compare_and_swap expanders.
7008 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
7009 operation. TARGET is an optional place to store the value result of
7010 the operation. ICODE is the particular instruction to expand. Return
7011 the result of the operation. */
7014 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
7015 rtx target
, enum insn_code icode
)
7017 enum machine_mode mode
= GET_MODE (mem
);
7020 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7021 target
= gen_reg_rtx (mode
);
7023 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
7024 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
7025 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
7026 old_val
= force_reg (mode
, old_val
);
7028 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
7029 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
7030 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
7031 new_val
= force_reg (mode
, new_val
);
7033 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
7034 if (insn
== NULL_RTX
)
7041 /* Expand a compare-and-swap operation and return its value. */
7044 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7046 enum machine_mode mode
= GET_MODE (mem
);
7047 enum insn_code icode
= sync_compare_and_swap
[mode
];
7049 if (icode
== CODE_FOR_nothing
)
7052 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
7055 /* Expand a compare-and-swap operation and store true into the result if
7056 the operation was successful and false otherwise. Return the result.
7057 Unlike other routines, TARGET is not optional. */
7060 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7062 enum machine_mode mode
= GET_MODE (mem
);
7063 enum insn_code icode
;
7064 rtx subtarget
, label0
, label1
;
7066 /* If the target supports a compare-and-swap pattern that simultaneously
7067 sets some flag for success, then use it. Otherwise use the regular
7068 compare-and-swap and follow that immediately with a compare insn. */
7069 icode
= sync_compare_and_swap_cc
[mode
];
7073 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7075 if (subtarget
!= NULL_RTX
)
7079 case CODE_FOR_nothing
:
7080 icode
= sync_compare_and_swap
[mode
];
7081 if (icode
== CODE_FOR_nothing
)
7084 /* Ensure that if old_val == mem, that we're not comparing
7085 against an old value. */
7086 if (MEM_P (old_val
))
7087 old_val
= force_reg (mode
, old_val
);
7089 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7091 if (subtarget
== NULL_RTX
)
7094 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
7097 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
7098 setcc instruction from the beginning. We don't work too hard here,
7099 but it's nice to not be stupid about initial code gen either. */
7100 if (STORE_FLAG_VALUE
== 1)
7102 icode
= setcc_gen_code
[EQ
];
7103 if (icode
!= CODE_FOR_nothing
)
7105 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
7109 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
7110 subtarget
= gen_reg_rtx (cmode
);
7112 insn
= GEN_FCN (icode
) (subtarget
);
7116 if (GET_MODE (target
) != GET_MODE (subtarget
))
7118 convert_move (target
, subtarget
, 1);
7126 /* Without an appropriate setcc instruction, use a set of branches to
7127 get 1 and 0 stored into target. Presumably if the target has a
7128 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
7130 label0
= gen_label_rtx ();
7131 label1
= gen_label_rtx ();
7133 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
7134 emit_move_insn (target
, const0_rtx
);
7135 emit_jump_insn (gen_jump (label1
));
7137 emit_label (label0
);
7138 emit_move_insn (target
, const1_rtx
);
7139 emit_label (label1
);
7144 /* This is a helper function for the other atomic operations. This function
7145 emits a loop that contains SEQ that iterates until a compare-and-swap
7146 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7147 a set of instructions that takes a value from OLD_REG as an input and
7148 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7149 set to the current contents of MEM. After SEQ, a compare-and-swap will
7150 attempt to update MEM with NEW_REG. The function returns true when the
7151 loop was generated successfully. */
7154 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7156 enum machine_mode mode
= GET_MODE (mem
);
7157 enum insn_code icode
;
7158 rtx label
, cmp_reg
, subtarget
;
7160 /* The loop we want to generate looks like
7166 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7167 if (cmp_reg != old_reg)
7170 Note that we only do the plain load from memory once. Subsequent
7171 iterations use the value loaded by the compare-and-swap pattern. */
7173 label
= gen_label_rtx ();
7174 cmp_reg
= gen_reg_rtx (mode
);
7176 emit_move_insn (cmp_reg
, mem
);
7178 emit_move_insn (old_reg
, cmp_reg
);
7182 /* If the target supports a compare-and-swap pattern that simultaneously
7183 sets some flag for success, then use it. Otherwise use the regular
7184 compare-and-swap and follow that immediately with a compare insn. */
7185 icode
= sync_compare_and_swap_cc
[mode
];
7189 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7191 if (subtarget
!= NULL_RTX
)
7193 gcc_assert (subtarget
== cmp_reg
);
7198 case CODE_FOR_nothing
:
7199 icode
= sync_compare_and_swap
[mode
];
7200 if (icode
== CODE_FOR_nothing
)
7203 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7205 if (subtarget
== NULL_RTX
)
7207 if (subtarget
!= cmp_reg
)
7208 emit_move_insn (cmp_reg
, subtarget
);
7210 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
7213 /* ??? Mark this jump predicted not taken? */
7214 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
7219 /* This function generates the atomic operation MEM CODE= VAL. In this
7220 case, we do not care about any resulting value. Returns NULL if we
7221 cannot generate the operation. */
7224 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
7226 enum machine_mode mode
= GET_MODE (mem
);
7227 enum insn_code icode
;
7230 /* Look to see if the target supports the operation directly. */
7234 icode
= sync_add_optab
[mode
];
7237 icode
= sync_ior_optab
[mode
];
7240 icode
= sync_xor_optab
[mode
];
7243 icode
= sync_and_optab
[mode
];
7246 icode
= sync_nand_optab
[mode
];
7250 icode
= sync_sub_optab
[mode
];
7251 if (icode
== CODE_FOR_nothing
|| CONST_INT_P (val
))
7253 icode
= sync_add_optab
[mode
];
7254 if (icode
!= CODE_FOR_nothing
)
7256 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7266 /* Generate the direct operation, if present. */
7267 if (icode
!= CODE_FOR_nothing
)
7269 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7270 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7271 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
7272 val
= force_reg (mode
, val
);
7274 insn
= GEN_FCN (icode
) (mem
, val
);
7282 /* Failing that, generate a compare-and-swap loop in which we perform the
7283 operation with normal arithmetic instructions. */
7284 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7286 rtx t0
= gen_reg_rtx (mode
), t1
;
7293 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7296 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7297 true, OPTAB_LIB_WIDEN
);
7299 insn
= get_insns ();
7302 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7309 /* This function generates the atomic operation MEM CODE= VAL. In this
7310 case, we do care about the resulting value: if AFTER is true then
7311 return the value MEM holds after the operation, if AFTER is false
7312 then return the value MEM holds before the operation. TARGET is an
7313 optional place for the result value to be stored. */
7316 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
7317 bool after
, rtx target
)
7319 enum machine_mode mode
= GET_MODE (mem
);
7320 enum insn_code old_code
, new_code
, icode
;
7324 /* Look to see if the target supports the operation directly. */
7328 old_code
= sync_old_add_optab
[mode
];
7329 new_code
= sync_new_add_optab
[mode
];
7332 old_code
= sync_old_ior_optab
[mode
];
7333 new_code
= sync_new_ior_optab
[mode
];
7336 old_code
= sync_old_xor_optab
[mode
];
7337 new_code
= sync_new_xor_optab
[mode
];
7340 old_code
= sync_old_and_optab
[mode
];
7341 new_code
= sync_new_and_optab
[mode
];
7344 old_code
= sync_old_nand_optab
[mode
];
7345 new_code
= sync_new_nand_optab
[mode
];
7349 old_code
= sync_old_sub_optab
[mode
];
7350 new_code
= sync_new_sub_optab
[mode
];
7351 if ((old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
7352 || CONST_INT_P (val
))
7354 old_code
= sync_old_add_optab
[mode
];
7355 new_code
= sync_new_add_optab
[mode
];
7356 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
7358 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7368 /* If the target does supports the proper new/old operation, great. But
7369 if we only support the opposite old/new operation, check to see if we
7370 can compensate. In the case in which the old value is supported, then
7371 we can always perform the operation again with normal arithmetic. In
7372 the case in which the new value is supported, then we can only handle
7373 this in the case the operation is reversible. */
7378 if (icode
== CODE_FOR_nothing
)
7381 if (icode
!= CODE_FOR_nothing
)
7388 if (icode
== CODE_FOR_nothing
7389 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
7392 if (icode
!= CODE_FOR_nothing
)
7397 /* If we found something supported, great. */
7398 if (icode
!= CODE_FOR_nothing
)
7400 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7401 target
= gen_reg_rtx (mode
);
7403 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7404 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7405 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7406 val
= force_reg (mode
, val
);
7408 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7413 /* If we need to compensate for using an operation with the
7414 wrong return value, do so now. */
7421 else if (code
== MINUS
)
7426 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
7427 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
7428 true, OPTAB_LIB_WIDEN
);
7435 /* Failing that, generate a compare-and-swap loop in which we perform the
7436 operation with normal arithmetic instructions. */
7437 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7439 rtx t0
= gen_reg_rtx (mode
), t1
;
7441 if (!target
|| !register_operand (target
, mode
))
7442 target
= gen_reg_rtx (mode
);
7447 emit_move_insn (target
, t0
);
7451 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7454 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7455 true, OPTAB_LIB_WIDEN
);
7457 emit_move_insn (target
, t1
);
7459 insn
= get_insns ();
7462 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7469 /* This function expands a test-and-set operation. Ideally we atomically
7470 store VAL in MEM and return the previous value in MEM. Some targets
7471 may not support this operation and only support VAL with the constant 1;
7472 in this case while the return value will be 0/1, but the exact value
7473 stored in MEM is target defined. TARGET is an option place to stick
7474 the return value. */
7477 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
7479 enum machine_mode mode
= GET_MODE (mem
);
7480 enum insn_code icode
;
7483 /* If the target supports the test-and-set directly, great. */
7484 icode
= sync_lock_test_and_set
[mode
];
7485 if (icode
!= CODE_FOR_nothing
)
7487 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7488 target
= gen_reg_rtx (mode
);
7490 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7491 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7492 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7493 val
= force_reg (mode
, val
);
7495 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7503 /* Otherwise, use a compare-and-swap loop for the exchange. */
7504 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7506 if (!target
|| !register_operand (target
, mode
))
7507 target
= gen_reg_rtx (mode
);
7508 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7509 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7510 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7517 #include "gt-optabs.h"