1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[COI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
99 enum machine_mode
*, int *);
100 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
102 /* Current libcall id. It doesn't matter what these are, as long
103 as they are unique to each libcall that is emitted. */
104 static HOST_WIDE_INT libcall_id
= 0;
106 /* Debug facility for use in GDB. */
107 void debug_optab_libfuncs (void);
109 #ifndef HAVE_conditional_trap
110 #define HAVE_conditional_trap 0
111 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
114 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
115 #if ENABLE_DECIMAL_BID_FORMAT
116 #define DECIMAL_PREFIX "bid_"
118 #define DECIMAL_PREFIX "dpd_"
122 /* Info about libfunc. We use same hashtable for normal optabs and conversion
123 optab. In the first case mode2 is unused. */
124 struct libfunc_entry
GTY(())
127 enum machine_mode mode1
, mode2
;
131 /* Hash table used to convert declarations into nodes. */
132 static GTY((param_is (struct libfunc_entry
))) htab_t libfunc_hash
;
134 /* Used for attribute_hash. */
137 hash_libfunc (const void *p
)
139 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
141 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
145 /* Used for optab_hash. */
148 eq_libfunc (const void *p
, const void *q
)
150 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
151 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
153 return (e1
->optab
== e2
->optab
154 && e1
->mode1
== e2
->mode1
155 && e1
->mode2
== e2
->mode2
);
158 /* Return libfunc corresponding operation defined by OPTAB converting
159 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
160 if no libfunc is available. */
162 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
163 enum machine_mode mode2
)
165 struct libfunc_entry e
;
166 struct libfunc_entry
**slot
;
168 e
.optab
= (size_t) (convert_optab_table
[0] - optab
);
171 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
174 if (optab
->libcall_gen
)
176 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
177 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
179 return (*slot
)->libfunc
;
185 return (*slot
)->libfunc
;
188 /* Return libfunc corresponding operation defined by OPTAB in MODE.
189 Trigger lazy initialization if needed, return NULL if no libfunc is
192 optab_libfunc (optab optab
, enum machine_mode mode
)
194 struct libfunc_entry e
;
195 struct libfunc_entry
**slot
;
197 e
.optab
= (size_t) (optab_table
[0] - optab
);
200 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
203 if (optab
->libcall_gen
)
205 optab
->libcall_gen (optab
, optab
->libcall_basename
,
206 optab
->libcall_suffix
, mode
);
207 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
210 return (*slot
)->libfunc
;
216 return (*slot
)->libfunc
;
220 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
221 the result of operation CODE applied to OP0 (and OP1 if it is a binary
224 If the last insn does not set TARGET, don't do anything, but return 1.
226 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
227 don't add the REG_EQUAL note but return 0. Our caller can then try
228 again, ensuring that TARGET is not one of the operands. */
231 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
233 rtx last_insn
, insn
, set
;
236 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
238 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
239 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
240 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
241 && GET_RTX_CLASS (code
) != RTX_COMPARE
242 && GET_RTX_CLASS (code
) != RTX_UNARY
)
245 if (GET_CODE (target
) == ZERO_EXTRACT
)
248 for (last_insn
= insns
;
249 NEXT_INSN (last_insn
) != NULL_RTX
;
250 last_insn
= NEXT_INSN (last_insn
))
253 set
= single_set (last_insn
);
257 if (! rtx_equal_p (SET_DEST (set
), target
)
258 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
259 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
260 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
263 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
264 besides the last insn. */
265 if (reg_overlap_mentioned_p (target
, op0
)
266 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
268 insn
= PREV_INSN (last_insn
);
269 while (insn
!= NULL_RTX
)
271 if (reg_set_p (target
, insn
))
274 insn
= PREV_INSN (insn
);
278 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
279 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
281 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
283 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
288 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
289 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
290 not actually do a sign-extend or zero-extend, but can leave the
291 higher-order bits of the result rtx undefined, for example, in the case
292 of logical operations, but not right shifts. */
295 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
296 int unsignedp
, int no_extend
)
300 /* If we don't have to extend and this is a constant, return it. */
301 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
304 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
305 extend since it will be more efficient to do so unless the signedness of
306 a promoted object differs from our extension. */
308 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
309 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
310 return convert_modes (mode
, oldmode
, op
, unsignedp
);
312 /* If MODE is no wider than a single word, we return a paradoxical
314 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
315 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
317 /* Otherwise, get an object of MODE, clobber it, and set the low-order
320 result
= gen_reg_rtx (mode
);
321 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
322 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
326 /* Return the optab used for computing the operation given by
327 the tree code, CODE. This function is not always usable (for
328 example, it cannot give complete results for multiplication
329 or division) but probably ought to be relied on more widely
330 throughout the expander. */
332 optab_for_tree_code (enum tree_code code
, const_tree type
)
344 return one_cmpl_optab
;
353 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
361 if (TYPE_SATURATING(type
))
362 return TYPE_UNSIGNED(type
) ? usdiv_optab
: ssdiv_optab
;
363 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
366 if (TYPE_SATURATING(type
))
367 return TYPE_UNSIGNED(type
) ? usashl_optab
: ssashl_optab
;
371 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
380 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
383 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
385 case REALIGN_LOAD_EXPR
:
386 return vec_realign_load_optab
;
389 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
392 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
395 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
398 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
400 case REDUC_PLUS_EXPR
:
401 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
403 case VEC_LSHIFT_EXPR
:
404 return vec_shl_optab
;
406 case VEC_RSHIFT_EXPR
:
407 return vec_shr_optab
;
409 case VEC_WIDEN_MULT_HI_EXPR
:
410 return TYPE_UNSIGNED (type
) ?
411 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
413 case VEC_WIDEN_MULT_LO_EXPR
:
414 return TYPE_UNSIGNED (type
) ?
415 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
417 case VEC_UNPACK_HI_EXPR
:
418 return TYPE_UNSIGNED (type
) ?
419 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
421 case VEC_UNPACK_LO_EXPR
:
422 return TYPE_UNSIGNED (type
) ?
423 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
425 case VEC_UNPACK_FLOAT_HI_EXPR
:
426 /* The signedness is determined from input operand. */
427 return TYPE_UNSIGNED (type
) ?
428 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
430 case VEC_UNPACK_FLOAT_LO_EXPR
:
431 /* The signedness is determined from input operand. */
432 return TYPE_UNSIGNED (type
) ?
433 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
435 case VEC_PACK_TRUNC_EXPR
:
436 return vec_pack_trunc_optab
;
438 case VEC_PACK_SAT_EXPR
:
439 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
441 case VEC_PACK_FIX_TRUNC_EXPR
:
442 /* The signedness is determined from output operand. */
443 return TYPE_UNSIGNED (type
) ?
444 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
450 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
453 case POINTER_PLUS_EXPR
:
455 if (TYPE_SATURATING(type
))
456 return TYPE_UNSIGNED(type
) ? usadd_optab
: ssadd_optab
;
457 return trapv
? addv_optab
: add_optab
;
460 if (TYPE_SATURATING(type
))
461 return TYPE_UNSIGNED(type
) ? ussub_optab
: sssub_optab
;
462 return trapv
? subv_optab
: sub_optab
;
465 if (TYPE_SATURATING(type
))
466 return TYPE_UNSIGNED(type
) ? usmul_optab
: ssmul_optab
;
467 return trapv
? smulv_optab
: smul_optab
;
470 if (TYPE_SATURATING(type
))
471 return TYPE_UNSIGNED(type
) ? usneg_optab
: ssneg_optab
;
472 return trapv
? negv_optab
: neg_optab
;
475 return trapv
? absv_optab
: abs_optab
;
477 case VEC_EXTRACT_EVEN_EXPR
:
478 return vec_extract_even_optab
;
480 case VEC_EXTRACT_ODD_EXPR
:
481 return vec_extract_odd_optab
;
483 case VEC_INTERLEAVE_HIGH_EXPR
:
484 return vec_interleave_high_optab
;
486 case VEC_INTERLEAVE_LOW_EXPR
:
487 return vec_interleave_low_optab
;
495 /* Expand vector widening operations.
497 There are two different classes of operations handled here:
498 1) Operations whose result is wider than all the arguments to the operation.
499 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
500 In this case OP0 and optionally OP1 would be initialized,
501 but WIDE_OP wouldn't (not relevant for this case).
502 2) Operations whose result is of the same size as the last argument to the
503 operation, but wider than all the other arguments to the operation.
504 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
505 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
507 E.g, when called to expand the following operations, this is how
508 the arguments will be initialized:
510 widening-sum 2 oprnd0 - oprnd1
511 widening-dot-product 3 oprnd0 oprnd1 oprnd2
512 widening-mult 2 oprnd0 oprnd1 -
513 type-promotion (vec-unpack) 1 oprnd0 - - */
516 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
519 tree oprnd0
, oprnd1
, oprnd2
;
520 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
521 optab widen_pattern_optab
;
523 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
526 rtx xop0
, xop1
, wxop
;
527 int nops
= TREE_OPERAND_LENGTH (exp
);
529 oprnd0
= TREE_OPERAND (exp
, 0);
530 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
531 widen_pattern_optab
=
532 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
533 icode
= (int) optab_handler (widen_pattern_optab
, tmode0
)->insn_code
;
534 gcc_assert (icode
!= CODE_FOR_nothing
);
535 xmode0
= insn_data
[icode
].operand
[1].mode
;
539 oprnd1
= TREE_OPERAND (exp
, 1);
540 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
541 xmode1
= insn_data
[icode
].operand
[2].mode
;
544 /* The last operand is of a wider mode than the rest of the operands. */
552 gcc_assert (tmode1
== tmode0
);
554 oprnd2
= TREE_OPERAND (exp
, 2);
555 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
556 wxmode
= insn_data
[icode
].operand
[3].mode
;
560 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
563 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
564 temp
= gen_reg_rtx (wmode
);
572 /* In case the insn wants input operands in modes different from
573 those of the actual operands, convert the operands. It would
574 seem that we don't need to convert CONST_INTs, but we do, so
575 that they're properly zero-extended, sign-extended or truncated
578 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
579 xop0
= convert_modes (xmode0
,
580 GET_MODE (op0
) != VOIDmode
586 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
587 xop1
= convert_modes (xmode1
,
588 GET_MODE (op1
) != VOIDmode
594 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
595 wxop
= convert_modes (wxmode
,
596 GET_MODE (wide_op
) != VOIDmode
601 /* Now, if insn's predicates don't allow our operands, put them into
604 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
605 && xmode0
!= VOIDmode
)
606 xop0
= copy_to_mode_reg (xmode0
, xop0
);
610 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
611 && xmode1
!= VOIDmode
)
612 xop1
= copy_to_mode_reg (xmode1
, xop1
);
616 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
617 && wxmode
!= VOIDmode
)
618 wxop
= copy_to_mode_reg (wxmode
, wxop
);
620 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
623 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
629 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
630 && wxmode
!= VOIDmode
)
631 wxop
= copy_to_mode_reg (wxmode
, wxop
);
633 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
636 pat
= GEN_FCN (icode
) (temp
, xop0
);
643 /* Generate code to perform an operation specified by TERNARY_OPTAB
644 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
646 UNSIGNEDP is for the case where we have to widen the operands
647 to perform the operation. It says to use zero-extension.
649 If TARGET is nonzero, the value
650 is generated there, if it is convenient to do so.
651 In all cases an rtx is returned for the locus of the value;
652 this may or may not be TARGET. */
655 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
656 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
658 int icode
= (int) optab_handler (ternary_optab
, mode
)->insn_code
;
659 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
660 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
661 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
664 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
666 gcc_assert (optab_handler (ternary_optab
, mode
)->insn_code
667 != CODE_FOR_nothing
);
669 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
670 temp
= gen_reg_rtx (mode
);
674 /* In case the insn wants input operands in modes different from
675 those of the actual operands, convert the operands. It would
676 seem that we don't need to convert CONST_INTs, but we do, so
677 that they're properly zero-extended, sign-extended or truncated
680 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
681 xop0
= convert_modes (mode0
,
682 GET_MODE (op0
) != VOIDmode
687 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
688 xop1
= convert_modes (mode1
,
689 GET_MODE (op1
) != VOIDmode
694 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
695 xop2
= convert_modes (mode2
,
696 GET_MODE (op2
) != VOIDmode
701 /* Now, if insn's predicates don't allow our operands, put them into
704 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
705 && mode0
!= VOIDmode
)
706 xop0
= copy_to_mode_reg (mode0
, xop0
);
708 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
709 && mode1
!= VOIDmode
)
710 xop1
= copy_to_mode_reg (mode1
, xop1
);
712 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
713 && mode2
!= VOIDmode
)
714 xop2
= copy_to_mode_reg (mode2
, xop2
);
716 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
723 /* Like expand_binop, but return a constant rtx if the result can be
724 calculated at compile time. The arguments and return value are
725 otherwise the same as for expand_binop. */
728 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
729 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
730 enum optab_methods methods
)
732 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
734 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
740 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
743 /* Like simplify_expand_binop, but always put the result in TARGET.
744 Return true if the expansion succeeded. */
747 force_expand_binop (enum machine_mode mode
, optab binoptab
,
748 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
749 enum optab_methods methods
)
751 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
752 target
, unsignedp
, methods
);
756 emit_move_insn (target
, x
);
760 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
763 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
765 enum insn_code icode
;
766 rtx rtx_op1
, rtx_op2
;
767 enum machine_mode mode1
;
768 enum machine_mode mode2
;
769 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
770 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
771 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
775 switch (TREE_CODE (vec_shift_expr
))
777 case VEC_RSHIFT_EXPR
:
778 shift_optab
= vec_shr_optab
;
780 case VEC_LSHIFT_EXPR
:
781 shift_optab
= vec_shl_optab
;
787 icode
= (int) optab_handler (shift_optab
, mode
)->insn_code
;
788 gcc_assert (icode
!= CODE_FOR_nothing
);
790 mode1
= insn_data
[icode
].operand
[1].mode
;
791 mode2
= insn_data
[icode
].operand
[2].mode
;
793 rtx_op1
= expand_normal (vec_oprnd
);
794 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
795 && mode1
!= VOIDmode
)
796 rtx_op1
= force_reg (mode1
, rtx_op1
);
798 rtx_op2
= expand_normal (shift_oprnd
);
799 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
800 && mode2
!= VOIDmode
)
801 rtx_op2
= force_reg (mode2
, rtx_op2
);
804 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
805 target
= gen_reg_rtx (mode
);
807 /* Emit instruction */
808 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
815 /* This subroutine of expand_doubleword_shift handles the cases in which
816 the effective shift value is >= BITS_PER_WORD. The arguments and return
817 value are the same as for the parent routine, except that SUPERWORD_OP1
818 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
819 INTO_TARGET may be null if the caller has decided to calculate it. */
822 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
823 rtx outof_target
, rtx into_target
,
824 int unsignedp
, enum optab_methods methods
)
826 if (into_target
!= 0)
827 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
828 into_target
, unsignedp
, methods
))
831 if (outof_target
!= 0)
833 /* For a signed right shift, we must fill OUTOF_TARGET with copies
834 of the sign bit, otherwise we must fill it with zeros. */
835 if (binoptab
!= ashr_optab
)
836 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
838 if (!force_expand_binop (word_mode
, binoptab
,
839 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
840 outof_target
, unsignedp
, methods
))
846 /* This subroutine of expand_doubleword_shift handles the cases in which
847 the effective shift value is < BITS_PER_WORD. The arguments and return
848 value are the same as for the parent routine. */
851 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
852 rtx outof_input
, rtx into_input
, rtx op1
,
853 rtx outof_target
, rtx into_target
,
854 int unsignedp
, enum optab_methods methods
,
855 unsigned HOST_WIDE_INT shift_mask
)
857 optab reverse_unsigned_shift
, unsigned_shift
;
860 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
861 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
863 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
864 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
865 the opposite direction to BINOPTAB. */
866 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
868 carries
= outof_input
;
869 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
870 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
875 /* We must avoid shifting by BITS_PER_WORD bits since that is either
876 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
877 has unknown behavior. Do a single shift first, then shift by the
878 remainder. It's OK to use ~OP1 as the remainder if shift counts
879 are truncated to the mode size. */
880 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
881 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
882 if (shift_mask
== BITS_PER_WORD
- 1)
884 tmp
= immed_double_const (-1, -1, op1_mode
);
885 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
890 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
891 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
895 if (tmp
== 0 || carries
== 0)
897 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
898 carries
, tmp
, 0, unsignedp
, methods
);
902 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
903 so the result can go directly into INTO_TARGET if convenient. */
904 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
905 into_target
, unsignedp
, methods
);
909 /* Now OR in the bits carried over from OUTOF_INPUT. */
910 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
911 into_target
, unsignedp
, methods
))
914 /* Use a standard word_mode shift for the out-of half. */
915 if (outof_target
!= 0)
916 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
917 outof_target
, unsignedp
, methods
))
924 #ifdef HAVE_conditional_move
925 /* Try implementing expand_doubleword_shift using conditional moves.
926 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
927 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
928 are the shift counts to use in the former and latter case. All other
929 arguments are the same as the parent routine. */
932 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
933 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
934 rtx outof_input
, rtx into_input
,
935 rtx subword_op1
, rtx superword_op1
,
936 rtx outof_target
, rtx into_target
,
937 int unsignedp
, enum optab_methods methods
,
938 unsigned HOST_WIDE_INT shift_mask
)
940 rtx outof_superword
, into_superword
;
942 /* Put the superword version of the output into OUTOF_SUPERWORD and
944 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
945 if (outof_target
!= 0 && subword_op1
== superword_op1
)
947 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
948 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
949 into_superword
= outof_target
;
950 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
951 outof_superword
, 0, unsignedp
, methods
))
956 into_superword
= gen_reg_rtx (word_mode
);
957 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
958 outof_superword
, into_superword
,
963 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
964 if (!expand_subword_shift (op1_mode
, binoptab
,
965 outof_input
, into_input
, subword_op1
,
966 outof_target
, into_target
,
967 unsignedp
, methods
, shift_mask
))
970 /* Select between them. Do the INTO half first because INTO_SUPERWORD
971 might be the current value of OUTOF_TARGET. */
972 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
973 into_target
, into_superword
, word_mode
, false))
976 if (outof_target
!= 0)
977 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
978 outof_target
, outof_superword
,
986 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
987 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
988 input operand; the shift moves bits in the direction OUTOF_INPUT->
989 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
990 of the target. OP1 is the shift count and OP1_MODE is its mode.
991 If OP1 is constant, it will have been truncated as appropriate
992 and is known to be nonzero.
994 If SHIFT_MASK is zero, the result of word shifts is undefined when the
995 shift count is outside the range [0, BITS_PER_WORD). This routine must
996 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
998 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
999 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1000 fill with zeros or sign bits as appropriate.
1002 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1003 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1004 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1005 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1008 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1009 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1010 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1011 function wants to calculate it itself.
1013 Return true if the shift could be successfully synthesized. */
1016 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
1017 rtx outof_input
, rtx into_input
, rtx op1
,
1018 rtx outof_target
, rtx into_target
,
1019 int unsignedp
, enum optab_methods methods
,
1020 unsigned HOST_WIDE_INT shift_mask
)
1022 rtx superword_op1
, tmp
, cmp1
, cmp2
;
1023 rtx subword_label
, done_label
;
1024 enum rtx_code cmp_code
;
1026 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1027 fill the result with sign or zero bits as appropriate. If so, the value
1028 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1029 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1030 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1032 This isn't worthwhile for constant shifts since the optimizers will
1033 cope better with in-range shift counts. */
1034 if (shift_mask
>= BITS_PER_WORD
1035 && outof_target
!= 0
1036 && !CONSTANT_P (op1
))
1038 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1039 outof_input
, into_input
, op1
,
1041 unsignedp
, methods
, shift_mask
))
1043 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1044 outof_target
, unsignedp
, methods
))
1049 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1050 is true when the effective shift value is less than BITS_PER_WORD.
1051 Set SUPERWORD_OP1 to the shift count that should be used to shift
1052 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1053 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1054 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1056 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1057 is a subword shift count. */
1058 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1060 cmp2
= CONST0_RTX (op1_mode
);
1062 superword_op1
= op1
;
1066 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1067 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1069 cmp2
= CONST0_RTX (op1_mode
);
1071 superword_op1
= cmp1
;
1076 /* If we can compute the condition at compile time, pick the
1077 appropriate subroutine. */
1078 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1079 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
1081 if (tmp
== const0_rtx
)
1082 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1083 outof_target
, into_target
,
1084 unsignedp
, methods
);
1086 return expand_subword_shift (op1_mode
, binoptab
,
1087 outof_input
, into_input
, op1
,
1088 outof_target
, into_target
,
1089 unsignedp
, methods
, shift_mask
);
1092 #ifdef HAVE_conditional_move
1093 /* Try using conditional moves to generate straight-line code. */
1095 rtx start
= get_last_insn ();
1096 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1097 cmp_code
, cmp1
, cmp2
,
1098 outof_input
, into_input
,
1100 outof_target
, into_target
,
1101 unsignedp
, methods
, shift_mask
))
1103 delete_insns_since (start
);
1107 /* As a last resort, use branches to select the correct alternative. */
1108 subword_label
= gen_label_rtx ();
1109 done_label
= gen_label_rtx ();
1112 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1113 0, 0, subword_label
);
1116 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1117 outof_target
, into_target
,
1118 unsignedp
, methods
))
1121 emit_jump_insn (gen_jump (done_label
));
1123 emit_label (subword_label
);
1125 if (!expand_subword_shift (op1_mode
, binoptab
,
1126 outof_input
, into_input
, op1
,
1127 outof_target
, into_target
,
1128 unsignedp
, methods
, shift_mask
))
1131 emit_label (done_label
);
1135 /* Subroutine of expand_binop. Perform a double word multiplication of
1136 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1137 as the target's word_mode. This function return NULL_RTX if anything
1138 goes wrong, in which case it may have already emitted instructions
1139 which need to be deleted.
1141 If we want to multiply two two-word values and have normal and widening
1142 multiplies of single-word values, we can do this with three smaller
1143 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1144 because we are not operating on one word at a time.
1146 The multiplication proceeds as follows:
1147 _______________________
1148 [__op0_high_|__op0_low__]
1149 _______________________
1150 * [__op1_high_|__op1_low__]
1151 _______________________________________________
1152 _______________________
1153 (1) [__op0_low__*__op1_low__]
1154 _______________________
1155 (2a) [__op0_low__*__op1_high_]
1156 _______________________
1157 (2b) [__op0_high_*__op1_low__]
1158 _______________________
1159 (3) [__op0_high_*__op1_high_]
1162 This gives a 4-word result. Since we are only interested in the
1163 lower 2 words, partial result (3) and the upper words of (2a) and
1164 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1165 calculated using non-widening multiplication.
1167 (1), however, needs to be calculated with an unsigned widening
1168 multiplication. If this operation is not directly supported we
1169 try using a signed widening multiplication and adjust the result.
1170 This adjustment works as follows:
1172 If both operands are positive then no adjustment is needed.
1174 If the operands have different signs, for example op0_low < 0 and
1175 op1_low >= 0, the instruction treats the most significant bit of
1176 op0_low as a sign bit instead of a bit with significance
1177 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1178 with 2**BITS_PER_WORD - op0_low, and two's complements the
1179 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1182 Similarly, if both operands are negative, we need to add
1183 (op0_low + op1_low) * 2**BITS_PER_WORD.
1185 We use a trick to adjust quickly. We logically shift op0_low right
1186 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1187 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1188 logical shift exists, we do an arithmetic right shift and subtract
1192 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1193 bool umulp
, enum optab_methods methods
)
1195 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1196 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1197 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1198 rtx product
, adjust
, product_high
, temp
;
1200 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1201 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1202 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1203 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1205 /* If we're using an unsigned multiply to directly compute the product
1206 of the low-order words of the operands and perform any required
1207 adjustments of the operands, we begin by trying two more multiplications
1208 and then computing the appropriate sum.
1210 We have checked above that the required addition is provided.
1211 Full-word addition will normally always succeed, especially if
1212 it is provided at all, so we don't worry about its failure. The
1213 multiplication may well fail, however, so we do handle that. */
1217 /* ??? This could be done with emit_store_flag where available. */
1218 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1219 NULL_RTX
, 1, methods
);
1221 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1222 NULL_RTX
, 0, OPTAB_DIRECT
);
1225 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1226 NULL_RTX
, 0, methods
);
1229 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1230 NULL_RTX
, 0, OPTAB_DIRECT
);
1237 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1238 NULL_RTX
, 0, OPTAB_DIRECT
);
1242 /* OP0_HIGH should now be dead. */
1246 /* ??? This could be done with emit_store_flag where available. */
1247 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1248 NULL_RTX
, 1, methods
);
1250 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1251 NULL_RTX
, 0, OPTAB_DIRECT
);
1254 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1255 NULL_RTX
, 0, methods
);
1258 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1259 NULL_RTX
, 0, OPTAB_DIRECT
);
1266 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1267 NULL_RTX
, 0, OPTAB_DIRECT
);
1271 /* OP1_HIGH should now be dead. */
1273 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1274 adjust
, 0, OPTAB_DIRECT
);
1276 if (target
&& !REG_P (target
))
1280 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1281 target
, 1, OPTAB_DIRECT
);
1283 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1284 target
, 1, OPTAB_DIRECT
);
1289 product_high
= operand_subword (product
, high
, 1, mode
);
1290 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1291 REG_P (product_high
) ? product_high
: adjust
,
1293 emit_move_insn (product_high
, adjust
);
1297 /* Wrapper around expand_binop which takes an rtx code to specify
1298 the operation to perform, not an optab pointer. All other
1299 arguments are the same. */
1301 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1302 rtx op1
, rtx target
, int unsignedp
,
1303 enum optab_methods methods
)
1305 optab binop
= code_to_optab
[(int) code
];
1308 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1311 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1312 binop. Order them according to commutative_operand_precedence and, if
1313 possible, try to put TARGET or a pseudo first. */
1315 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1317 int op0_prec
= commutative_operand_precedence (op0
);
1318 int op1_prec
= commutative_operand_precedence (op1
);
1320 if (op0_prec
< op1_prec
)
1323 if (op0_prec
> op1_prec
)
1326 /* With equal precedence, both orders are ok, but it is better if the
1327 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1328 if (target
== 0 || REG_P (target
))
1329 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1331 return rtx_equal_p (op1
, target
);
1334 /* Return true if BINOPTAB implements a shift operation. */
1337 shift_optab_p (optab binoptab
)
1339 switch (binoptab
->code
)
1355 /* Return true if BINOPTAB implements a commutative binary operation. */
1358 commutative_optab_p (optab binoptab
)
1360 return (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1361 || binoptab
== smul_widen_optab
1362 || binoptab
== umul_widen_optab
1363 || binoptab
== smul_highpart_optab
1364 || binoptab
== umul_highpart_optab
);
1367 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1368 optimizing, and if the operand is a constant that costs more than
1369 1 instruction, force the constant into a register and return that
1370 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1373 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1374 rtx x
, bool unsignedp
)
1376 if (mode
!= VOIDmode
1379 && rtx_cost (x
, binoptab
->code
) > COSTS_N_INSNS (1))
1381 if (GET_CODE (x
) == CONST_INT
)
1383 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1384 if (intval
!= INTVAL (x
))
1385 x
= GEN_INT (intval
);
1388 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1389 x
= force_reg (mode
, x
);
1394 /* Helper function for expand_binop: handle the case where there
1395 is an insn that directly implements the indicated operation.
1396 Returns null if this is not possible. */
1398 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1400 rtx target
, int unsignedp
, enum optab_methods methods
,
1403 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
1404 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1405 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1406 enum machine_mode tmp_mode
;
1409 rtx xop0
= op0
, xop1
= op1
;
1416 temp
= gen_reg_rtx (mode
);
1418 /* If it is a commutative operator and the modes would match
1419 if we would swap the operands, we can save the conversions. */
1420 commutative_p
= commutative_optab_p (binoptab
);
1422 && GET_MODE (xop0
) != mode0
&& GET_MODE (xop1
) != mode1
1423 && GET_MODE (xop0
) == mode1
&& GET_MODE (xop1
) == mode1
)
1430 /* If we are optimizing, force expensive constants into a register. */
1431 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
1432 if (!shift_optab_p (binoptab
))
1433 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
1435 /* In case the insn wants input operands in modes different from
1436 those of the actual operands, convert the operands. It would
1437 seem that we don't need to convert CONST_INTs, but we do, so
1438 that they're properly zero-extended, sign-extended or truncated
1441 if (GET_MODE (xop0
) != mode0
&& mode0
!= VOIDmode
)
1442 xop0
= convert_modes (mode0
,
1443 GET_MODE (xop0
) != VOIDmode
1448 if (GET_MODE (xop1
) != mode1
&& mode1
!= VOIDmode
)
1449 xop1
= convert_modes (mode1
,
1450 GET_MODE (xop1
) != VOIDmode
1455 /* If operation is commutative,
1456 try to make the first operand a register.
1457 Even better, try to make it the same as the target.
1458 Also try to make the last operand a constant. */
1460 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1467 /* Now, if insn's predicates don't allow our operands, put them into
1470 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1471 && mode0
!= VOIDmode
)
1472 xop0
= copy_to_mode_reg (mode0
, xop0
);
1474 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1475 && mode1
!= VOIDmode
)
1476 xop1
= copy_to_mode_reg (mode1
, xop1
);
1478 if (binoptab
== vec_pack_trunc_optab
1479 || binoptab
== vec_pack_usat_optab
1480 || binoptab
== vec_pack_ssat_optab
1481 || binoptab
== vec_pack_ufix_trunc_optab
1482 || binoptab
== vec_pack_sfix_trunc_optab
)
1484 /* The mode of the result is different then the mode of the
1486 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1487 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1493 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1494 temp
= gen_reg_rtx (tmp_mode
);
1496 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1499 /* If PAT is composed of more than one insn, try to add an appropriate
1500 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1501 operand, call expand_binop again, this time without a target. */
1502 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1503 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1505 delete_insns_since (last
);
1506 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1507 unsignedp
, methods
);
1514 delete_insns_since (last
);
1518 /* Generate code to perform an operation specified by BINOPTAB
1519 on operands OP0 and OP1, with result having machine-mode MODE.
1521 UNSIGNEDP is for the case where we have to widen the operands
1522 to perform the operation. It says to use zero-extension.
1524 If TARGET is nonzero, the value
1525 is generated there, if it is convenient to do so.
1526 In all cases an rtx is returned for the locus of the value;
1527 this may or may not be TARGET. */
1530 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1531 rtx target
, int unsignedp
, enum optab_methods methods
)
1533 enum optab_methods next_methods
1534 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1535 ? OPTAB_WIDEN
: methods
);
1536 enum mode_class
class;
1537 enum machine_mode wider_mode
;
1540 rtx entry_last
= get_last_insn ();
1543 class = GET_MODE_CLASS (mode
);
1545 /* If subtracting an integer constant, convert this into an addition of
1546 the negated constant. */
1548 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1550 op1
= negate_rtx (mode
, op1
);
1551 binoptab
= add_optab
;
1554 /* Record where to delete back to if we backtrack. */
1555 last
= get_last_insn ();
1557 /* If we can do it with a three-operand insn, do so. */
1559 if (methods
!= OPTAB_MUST_WIDEN
1560 && optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1562 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1563 unsignedp
, methods
, last
);
1568 /* If we were trying to rotate, and that didn't work, try rotating
1569 the other direction before falling back to shifts and bitwise-or. */
1570 if (((binoptab
== rotl_optab
1571 && optab_handler (rotr_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
1572 || (binoptab
== rotr_optab
1573 && optab_handler (rotl_optab
, mode
)->insn_code
!= CODE_FOR_nothing
))
1574 && class == MODE_INT
)
1576 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1578 unsigned int bits
= GET_MODE_BITSIZE (mode
);
1580 if (GET_CODE (op1
) == CONST_INT
)
1581 newop1
= GEN_INT (bits
- INTVAL (op1
));
1582 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1583 newop1
= negate_rtx (mode
, op1
);
1585 newop1
= expand_binop (mode
, sub_optab
,
1586 GEN_INT (bits
), op1
,
1587 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1589 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1590 target
, unsignedp
, methods
, last
);
1595 /* If this is a multiply, see if we can do a widening operation that
1596 takes operands of this mode and makes a wider mode. */
1598 if (binoptab
== smul_optab
1599 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1600 && ((optab_handler ((unsignedp
? umul_widen_optab
: smul_widen_optab
),
1601 GET_MODE_WIDER_MODE (mode
))->insn_code
)
1602 != CODE_FOR_nothing
))
1604 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1605 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1606 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1610 if (GET_MODE_CLASS (mode
) == MODE_INT
1611 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1612 GET_MODE_BITSIZE (GET_MODE (temp
))))
1613 return gen_lowpart (mode
, temp
);
1615 return convert_to_mode (mode
, temp
, unsignedp
);
1619 /* Look for a wider mode of the same class for which we think we
1620 can open-code the operation. Check for a widening multiply at the
1621 wider mode as well. */
1623 if (CLASS_HAS_WIDER_MODES_P (class)
1624 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1625 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1626 wider_mode
!= VOIDmode
;
1627 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1629 if (optab_handler (binoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
1630 || (binoptab
== smul_optab
1631 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1632 && ((optab_handler ((unsignedp
? umul_widen_optab
1633 : smul_widen_optab
),
1634 GET_MODE_WIDER_MODE (wider_mode
))->insn_code
)
1635 != CODE_FOR_nothing
)))
1637 rtx xop0
= op0
, xop1
= op1
;
1640 /* For certain integer operations, we need not actually extend
1641 the narrow operands, as long as we will truncate
1642 the results to the same narrowness. */
1644 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1645 || binoptab
== xor_optab
1646 || binoptab
== add_optab
|| binoptab
== sub_optab
1647 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1648 && class == MODE_INT
)
1651 xop0
= avoid_expensive_constant (mode
, binoptab
,
1653 if (binoptab
!= ashl_optab
)
1654 xop1
= avoid_expensive_constant (mode
, binoptab
,
1658 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1660 /* The second operand of a shift must always be extended. */
1661 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1662 no_extend
&& binoptab
!= ashl_optab
);
1664 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1665 unsignedp
, OPTAB_DIRECT
);
1668 if (class != MODE_INT
1669 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1670 GET_MODE_BITSIZE (wider_mode
)))
1673 target
= gen_reg_rtx (mode
);
1674 convert_move (target
, temp
, 0);
1678 return gen_lowpart (mode
, temp
);
1681 delete_insns_since (last
);
1685 /* If operation is commutative,
1686 try to make the first operand a register.
1687 Even better, try to make it the same as the target.
1688 Also try to make the last operand a constant. */
1689 if (commutative_optab_p (binoptab
)
1690 && swap_commutative_operands_with_target (target
, op0
, op1
))
1697 /* These can be done a word at a time. */
1698 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1699 && class == MODE_INT
1700 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1701 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1707 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1708 won't be accurate, so use a new target. */
1709 if (target
== 0 || target
== op0
|| target
== op1
)
1710 target
= gen_reg_rtx (mode
);
1714 /* Do the actual arithmetic. */
1715 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1717 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1718 rtx x
= expand_binop (word_mode
, binoptab
,
1719 operand_subword_force (op0
, i
, mode
),
1720 operand_subword_force (op1
, i
, mode
),
1721 target_piece
, unsignedp
, next_methods
);
1726 if (target_piece
!= x
)
1727 emit_move_insn (target_piece
, x
);
1730 insns
= get_insns ();
1733 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1735 if (binoptab
->code
!= UNKNOWN
)
1737 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1738 copy_rtx (op0
), copy_rtx (op1
));
1742 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1747 /* Synthesize double word shifts from single word shifts. */
1748 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1749 || binoptab
== ashr_optab
)
1750 && class == MODE_INT
1751 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1752 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1753 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1754 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1755 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1757 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1758 enum machine_mode op1_mode
;
1760 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1761 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1762 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1764 /* Apply the truncation to constant shifts. */
1765 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1766 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1768 if (op1
== CONST0_RTX (op1_mode
))
1771 /* Make sure that this is a combination that expand_doubleword_shift
1772 can handle. See the comments there for details. */
1773 if (double_shift_mask
== 0
1774 || (shift_mask
== BITS_PER_WORD
- 1
1775 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1777 rtx insns
, equiv_value
;
1778 rtx into_target
, outof_target
;
1779 rtx into_input
, outof_input
;
1780 int left_shift
, outof_word
;
1782 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1783 won't be accurate, so use a new target. */
1784 if (target
== 0 || target
== op0
|| target
== op1
)
1785 target
= gen_reg_rtx (mode
);
1789 /* OUTOF_* is the word we are shifting bits away from, and
1790 INTO_* is the word that we are shifting bits towards, thus
1791 they differ depending on the direction of the shift and
1792 WORDS_BIG_ENDIAN. */
1794 left_shift
= binoptab
== ashl_optab
;
1795 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1797 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1798 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1800 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1801 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1803 if (expand_doubleword_shift (op1_mode
, binoptab
,
1804 outof_input
, into_input
, op1
,
1805 outof_target
, into_target
,
1806 unsignedp
, next_methods
, shift_mask
))
1808 insns
= get_insns ();
1811 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1812 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1819 /* Synthesize double word rotates from single word shifts. */
1820 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1821 && class == MODE_INT
1822 && GET_CODE (op1
) == CONST_INT
1823 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1824 && optab_handler (ashl_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
1825 && optab_handler (lshr_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1828 rtx into_target
, outof_target
;
1829 rtx into_input
, outof_input
;
1831 int shift_count
, left_shift
, outof_word
;
1833 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1834 won't be accurate, so use a new target. Do this also if target is not
1835 a REG, first because having a register instead may open optimization
1836 opportunities, and second because if target and op0 happen to be MEMs
1837 designating the same location, we would risk clobbering it too early
1838 in the code sequence we generate below. */
1839 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1840 target
= gen_reg_rtx (mode
);
1844 shift_count
= INTVAL (op1
);
1846 /* OUTOF_* is the word we are shifting bits away from, and
1847 INTO_* is the word that we are shifting bits towards, thus
1848 they differ depending on the direction of the shift and
1849 WORDS_BIG_ENDIAN. */
1851 left_shift
= (binoptab
== rotl_optab
);
1852 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1854 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1855 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1857 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1858 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1860 if (shift_count
== BITS_PER_WORD
)
1862 /* This is just a word swap. */
1863 emit_move_insn (outof_target
, into_input
);
1864 emit_move_insn (into_target
, outof_input
);
1869 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1870 rtx first_shift_count
, second_shift_count
;
1871 optab reverse_unsigned_shift
, unsigned_shift
;
1873 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1874 ? lshr_optab
: ashl_optab
);
1876 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1877 ? ashl_optab
: lshr_optab
);
1879 if (shift_count
> BITS_PER_WORD
)
1881 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1882 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1886 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1887 second_shift_count
= GEN_INT (shift_count
);
1890 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1891 outof_input
, first_shift_count
,
1892 NULL_RTX
, unsignedp
, next_methods
);
1893 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1894 into_input
, second_shift_count
,
1895 NULL_RTX
, unsignedp
, next_methods
);
1897 if (into_temp1
!= 0 && into_temp2
!= 0)
1898 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1899 into_target
, unsignedp
, next_methods
);
1903 if (inter
!= 0 && inter
!= into_target
)
1904 emit_move_insn (into_target
, inter
);
1906 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1907 into_input
, first_shift_count
,
1908 NULL_RTX
, unsignedp
, next_methods
);
1909 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1910 outof_input
, second_shift_count
,
1911 NULL_RTX
, unsignedp
, next_methods
);
1913 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1914 inter
= expand_binop (word_mode
, ior_optab
,
1915 outof_temp1
, outof_temp2
,
1916 outof_target
, unsignedp
, next_methods
);
1918 if (inter
!= 0 && inter
!= outof_target
)
1919 emit_move_insn (outof_target
, inter
);
1922 insns
= get_insns ();
1927 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1928 block to help the register allocator a bit. But a multi-word
1929 rotate will need all the input bits when setting the output
1930 bits, so there clearly is a conflict between the input and
1931 output registers. So we can't use a no-conflict block here. */
1937 /* These can be done a word at a time by propagating carries. */
1938 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1939 && class == MODE_INT
1940 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1941 && optab_handler (binoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
1944 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1945 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1946 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1947 rtx xop0
, xop1
, xtarget
;
1949 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1950 value is one of those, use it. Otherwise, use 1 since it is the
1951 one easiest to get. */
1952 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1953 int normalizep
= STORE_FLAG_VALUE
;
1958 /* Prepare the operands. */
1959 xop0
= force_reg (mode
, op0
);
1960 xop1
= force_reg (mode
, op1
);
1962 xtarget
= gen_reg_rtx (mode
);
1964 if (target
== 0 || !REG_P (target
))
1967 /* Indicate for flow that the entire target reg is being set. */
1969 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1971 /* Do the actual arithmetic. */
1972 for (i
= 0; i
< nwords
; i
++)
1974 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1975 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1976 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1977 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1980 /* Main add/subtract of the input operands. */
1981 x
= expand_binop (word_mode
, binoptab
,
1982 op0_piece
, op1_piece
,
1983 target_piece
, unsignedp
, next_methods
);
1989 /* Store carry from main add/subtract. */
1990 carry_out
= gen_reg_rtx (word_mode
);
1991 carry_out
= emit_store_flag_force (carry_out
,
1992 (binoptab
== add_optab
1995 word_mode
, 1, normalizep
);
2002 /* Add/subtract previous carry to main result. */
2003 newx
= expand_binop (word_mode
,
2004 normalizep
== 1 ? binoptab
: otheroptab
,
2006 NULL_RTX
, 1, next_methods
);
2010 /* Get out carry from adding/subtracting carry in. */
2011 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2012 carry_tmp
= emit_store_flag_force (carry_tmp
,
2013 (binoptab
== add_optab
2016 word_mode
, 1, normalizep
);
2018 /* Logical-ior the two poss. carry together. */
2019 carry_out
= expand_binop (word_mode
, ior_optab
,
2020 carry_out
, carry_tmp
,
2021 carry_out
, 0, next_methods
);
2025 emit_move_insn (target_piece
, newx
);
2029 if (x
!= target_piece
)
2030 emit_move_insn (target_piece
, x
);
2033 carry_in
= carry_out
;
2036 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2038 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
2039 || ! rtx_equal_p (target
, xtarget
))
2041 rtx temp
= emit_move_insn (target
, xtarget
);
2043 set_unique_reg_note (temp
,
2045 gen_rtx_fmt_ee (binoptab
->code
, mode
,
2056 delete_insns_since (last
);
2059 /* Attempt to synthesize double word multiplies using a sequence of word
2060 mode multiplications. We first attempt to generate a sequence using a
2061 more efficient unsigned widening multiply, and if that fails we then
2062 try using a signed widening multiply. */
2064 if (binoptab
== smul_optab
2065 && class == MODE_INT
2066 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2067 && optab_handler (smul_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
2068 && optab_handler (add_optab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
2070 rtx product
= NULL_RTX
;
2072 if (optab_handler (umul_widen_optab
, mode
)->insn_code
2073 != CODE_FOR_nothing
)
2075 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2078 delete_insns_since (last
);
2081 if (product
== NULL_RTX
2082 && optab_handler (smul_widen_optab
, mode
)->insn_code
2083 != CODE_FOR_nothing
)
2085 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2088 delete_insns_since (last
);
2091 if (product
!= NULL_RTX
)
2093 if (optab_handler (mov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2095 temp
= emit_move_insn (target
? target
: product
, product
);
2096 set_unique_reg_note (temp
,
2098 gen_rtx_fmt_ee (MULT
, mode
,
2106 /* It can't be open-coded in this mode.
2107 Use a library call if one is available and caller says that's ok. */
2109 libfunc
= optab_libfunc (binoptab
, mode
);
2111 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2115 enum machine_mode op1_mode
= mode
;
2120 if (shift_optab_p (binoptab
))
2122 op1_mode
= targetm
.libgcc_shift_count_mode ();
2123 /* Specify unsigned here,
2124 since negative shift counts are meaningless. */
2125 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2128 if (GET_MODE (op0
) != VOIDmode
2129 && GET_MODE (op0
) != mode
)
2130 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2132 /* Pass 1 for NO_QUEUE so we don't lose any increments
2133 if the libcall is cse'd or moved. */
2134 value
= emit_library_call_value (libfunc
,
2135 NULL_RTX
, LCT_CONST
, mode
, 2,
2136 op0
, mode
, op1x
, op1_mode
);
2138 insns
= get_insns ();
2141 target
= gen_reg_rtx (mode
);
2142 emit_libcall_block (insns
, target
, value
,
2143 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
2148 delete_insns_since (last
);
2150 /* It can't be done in this mode. Can we do it in a wider mode? */
2152 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2153 || methods
== OPTAB_MUST_WIDEN
))
2155 /* Caller says, don't even try. */
2156 delete_insns_since (entry_last
);
2160 /* Compute the value of METHODS to pass to recursive calls.
2161 Don't allow widening to be tried recursively. */
2163 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2165 /* Look for a wider mode of the same class for which it appears we can do
2168 if (CLASS_HAS_WIDER_MODES_P (class))
2170 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2171 wider_mode
!= VOIDmode
;
2172 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2174 if ((optab_handler (binoptab
, wider_mode
)->insn_code
2175 != CODE_FOR_nothing
)
2176 || (methods
== OPTAB_LIB
2177 && optab_libfunc (binoptab
, wider_mode
)))
2179 rtx xop0
= op0
, xop1
= op1
;
2182 /* For certain integer operations, we need not actually extend
2183 the narrow operands, as long as we will truncate
2184 the results to the same narrowness. */
2186 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2187 || binoptab
== xor_optab
2188 || binoptab
== add_optab
|| binoptab
== sub_optab
2189 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2190 && class == MODE_INT
)
2193 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2194 unsignedp
, no_extend
);
2196 /* The second operand of a shift must always be extended. */
2197 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2198 no_extend
&& binoptab
!= ashl_optab
);
2200 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2201 unsignedp
, methods
);
2204 if (class != MODE_INT
2205 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2206 GET_MODE_BITSIZE (wider_mode
)))
2209 target
= gen_reg_rtx (mode
);
2210 convert_move (target
, temp
, 0);
2214 return gen_lowpart (mode
, temp
);
2217 delete_insns_since (last
);
2222 delete_insns_since (entry_last
);
2226 /* Expand a binary operator which has both signed and unsigned forms.
2227 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2230 If we widen unsigned operands, we may use a signed wider operation instead
2231 of an unsigned wider operation, since the result would be the same. */
2234 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2235 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2236 enum optab_methods methods
)
2239 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2240 struct optab wide_soptab
;
2242 /* Do it without widening, if possible. */
2243 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2244 unsignedp
, OPTAB_DIRECT
);
2245 if (temp
|| methods
== OPTAB_DIRECT
)
2248 /* Try widening to a signed int. Make a fake signed optab that
2249 hides any signed insn for direct use. */
2250 wide_soptab
= *soptab
;
2251 optab_handler (&wide_soptab
, mode
)->insn_code
= CODE_FOR_nothing
;
2252 /* We don't want to generate new hash table entries from this fake
2254 wide_soptab
.libcall_gen
= NULL
;
2256 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2257 unsignedp
, OPTAB_WIDEN
);
2259 /* For unsigned operands, try widening to an unsigned int. */
2260 if (temp
== 0 && unsignedp
)
2261 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2262 unsignedp
, OPTAB_WIDEN
);
2263 if (temp
|| methods
== OPTAB_WIDEN
)
2266 /* Use the right width lib call if that exists. */
2267 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2268 if (temp
|| methods
== OPTAB_LIB
)
2271 /* Must widen and use a lib call, use either signed or unsigned. */
2272 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2273 unsignedp
, methods
);
2277 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2278 unsignedp
, methods
);
2282 /* Generate code to perform an operation specified by UNOPPTAB
2283 on operand OP0, with two results to TARG0 and TARG1.
2284 We assume that the order of the operands for the instruction
2285 is TARG0, TARG1, OP0.
2287 Either TARG0 or TARG1 may be zero, but what that means is that
2288 the result is not actually wanted. We will generate it into
2289 a dummy pseudo-reg and discard it. They may not both be zero.
2291 Returns 1 if this operation can be performed; 0 if not. */
2294 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2297 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2298 enum mode_class
class;
2299 enum machine_mode wider_mode
;
2300 rtx entry_last
= get_last_insn ();
2303 class = GET_MODE_CLASS (mode
);
2306 targ0
= gen_reg_rtx (mode
);
2308 targ1
= gen_reg_rtx (mode
);
2310 /* Record where to go back to if we fail. */
2311 last
= get_last_insn ();
2313 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2315 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
2316 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2320 if (GET_MODE (xop0
) != VOIDmode
2321 && GET_MODE (xop0
) != mode0
)
2322 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2324 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2325 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2326 xop0
= copy_to_mode_reg (mode0
, xop0
);
2328 /* We could handle this, but we should always be called with a pseudo
2329 for our targets and all insns should take them as outputs. */
2330 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2331 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2333 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2340 delete_insns_since (last
);
2343 /* It can't be done in this mode. Can we do it in a wider mode? */
2345 if (CLASS_HAS_WIDER_MODES_P (class))
2347 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2348 wider_mode
!= VOIDmode
;
2349 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2351 if (optab_handler (unoptab
, wider_mode
)->insn_code
2352 != CODE_FOR_nothing
)
2354 rtx t0
= gen_reg_rtx (wider_mode
);
2355 rtx t1
= gen_reg_rtx (wider_mode
);
2356 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2358 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2360 convert_move (targ0
, t0
, unsignedp
);
2361 convert_move (targ1
, t1
, unsignedp
);
2365 delete_insns_since (last
);
2370 delete_insns_since (entry_last
);
2374 /* Generate code to perform an operation specified by BINOPTAB
2375 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2376 We assume that the order of the operands for the instruction
2377 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2378 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2380 Either TARG0 or TARG1 may be zero, but what that means is that
2381 the result is not actually wanted. We will generate it into
2382 a dummy pseudo-reg and discard it. They may not both be zero.
2384 Returns 1 if this operation can be performed; 0 if not. */
2387 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2390 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2391 enum mode_class
class;
2392 enum machine_mode wider_mode
;
2393 rtx entry_last
= get_last_insn ();
2396 class = GET_MODE_CLASS (mode
);
2399 targ0
= gen_reg_rtx (mode
);
2401 targ1
= gen_reg_rtx (mode
);
2403 /* Record where to go back to if we fail. */
2404 last
= get_last_insn ();
2406 if (optab_handler (binoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2408 int icode
= (int) optab_handler (binoptab
, mode
)->insn_code
;
2409 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2410 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2412 rtx xop0
= op0
, xop1
= op1
;
2414 /* If we are optimizing, force expensive constants into a register. */
2415 xop0
= avoid_expensive_constant (mode0
, binoptab
, xop0
, unsignedp
);
2416 xop1
= avoid_expensive_constant (mode1
, binoptab
, xop1
, unsignedp
);
2418 /* In case the insn wants input operands in modes different from
2419 those of the actual operands, convert the operands. It would
2420 seem that we don't need to convert CONST_INTs, but we do, so
2421 that they're properly zero-extended, sign-extended or truncated
2424 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2425 xop0
= convert_modes (mode0
,
2426 GET_MODE (op0
) != VOIDmode
2431 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2432 xop1
= convert_modes (mode1
,
2433 GET_MODE (op1
) != VOIDmode
2438 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2439 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2440 xop0
= copy_to_mode_reg (mode0
, xop0
);
2442 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2443 xop1
= copy_to_mode_reg (mode1
, xop1
);
2445 /* We could handle this, but we should always be called with a pseudo
2446 for our targets and all insns should take them as outputs. */
2447 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2448 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2450 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2457 delete_insns_since (last
);
2460 /* It can't be done in this mode. Can we do it in a wider mode? */
2462 if (CLASS_HAS_WIDER_MODES_P (class))
2464 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2465 wider_mode
!= VOIDmode
;
2466 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2468 if (optab_handler (binoptab
, wider_mode
)->insn_code
2469 != CODE_FOR_nothing
)
2471 rtx t0
= gen_reg_rtx (wider_mode
);
2472 rtx t1
= gen_reg_rtx (wider_mode
);
2473 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2474 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2476 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2479 convert_move (targ0
, t0
, unsignedp
);
2480 convert_move (targ1
, t1
, unsignedp
);
2484 delete_insns_since (last
);
2489 delete_insns_since (entry_last
);
2493 /* Expand the two-valued library call indicated by BINOPTAB, but
2494 preserve only one of the values. If TARG0 is non-NULL, the first
2495 value is placed into TARG0; otherwise the second value is placed
2496 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2497 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2498 This routine assumes that the value returned by the library call is
2499 as if the return value was of an integral mode twice as wide as the
2500 mode of OP0. Returns 1 if the call was successful. */
2503 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2504 rtx targ0
, rtx targ1
, enum rtx_code code
)
2506 enum machine_mode mode
;
2507 enum machine_mode libval_mode
;
2512 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2513 gcc_assert (!targ0
!= !targ1
);
2515 mode
= GET_MODE (op0
);
2516 libfunc
= optab_libfunc (binoptab
, mode
);
2520 /* The value returned by the library function will have twice as
2521 many bits as the nominal MODE. */
2522 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2525 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2529 /* Get the part of VAL containing the value that we want. */
2530 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2531 targ0
? 0 : GET_MODE_SIZE (mode
));
2532 insns
= get_insns ();
2534 /* Move the into the desired location. */
2535 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2536 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2542 /* Wrapper around expand_unop which takes an rtx code to specify
2543 the operation to perform, not an optab pointer. All other
2544 arguments are the same. */
2546 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2547 rtx target
, int unsignedp
)
2549 optab unop
= code_to_optab
[(int) code
];
2552 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2558 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2560 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2562 enum mode_class
class = GET_MODE_CLASS (mode
);
2563 if (CLASS_HAS_WIDER_MODES_P (class))
2565 enum machine_mode wider_mode
;
2566 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2567 wider_mode
!= VOIDmode
;
2568 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2570 if (optab_handler (clz_optab
, wider_mode
)->insn_code
2571 != CODE_FOR_nothing
)
2573 rtx xop0
, temp
, last
;
2575 last
= get_last_insn ();
2578 target
= gen_reg_rtx (mode
);
2579 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2580 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2582 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2583 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2584 - GET_MODE_BITSIZE (mode
)),
2585 target
, true, OPTAB_DIRECT
);
2587 delete_insns_since (last
);
2596 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2597 quantities, choosing which based on whether the high word is nonzero. */
2599 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2601 rtx xop0
= force_reg (mode
, op0
);
2602 rtx subhi
= gen_highpart (word_mode
, xop0
);
2603 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2604 rtx hi0_label
= gen_label_rtx ();
2605 rtx after_label
= gen_label_rtx ();
2606 rtx seq
, temp
, result
;
2608 /* If we were not given a target, use a word_mode register, not a
2609 'mode' register. The result will fit, and nobody is expecting
2610 anything bigger (the return type of __builtin_clz* is int). */
2612 target
= gen_reg_rtx (word_mode
);
2614 /* In any case, write to a word_mode scratch in both branches of the
2615 conditional, so we can ensure there is a single move insn setting
2616 'target' to tag a REG_EQUAL note on. */
2617 result
= gen_reg_rtx (word_mode
);
2621 /* If the high word is not equal to zero,
2622 then clz of the full value is clz of the high word. */
2623 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2624 word_mode
, true, hi0_label
);
2626 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2631 convert_move (result
, temp
, true);
2633 emit_jump_insn (gen_jump (after_label
));
2636 /* Else clz of the full value is clz of the low word plus the number
2637 of bits in the high word. */
2638 emit_label (hi0_label
);
2640 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2643 temp
= expand_binop (word_mode
, add_optab
, temp
,
2644 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2645 result
, true, OPTAB_DIRECT
);
2649 convert_move (result
, temp
, true);
2651 emit_label (after_label
);
2652 convert_move (target
, result
, true);
2657 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2669 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2671 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2673 enum mode_class
class = GET_MODE_CLASS (mode
);
2674 enum machine_mode wider_mode
;
2677 if (!CLASS_HAS_WIDER_MODES_P (class))
2680 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2681 wider_mode
!= VOIDmode
;
2682 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2683 if (optab_handler (bswap_optab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
2688 last
= get_last_insn ();
2690 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2691 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2694 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2695 size_int (GET_MODE_BITSIZE (wider_mode
)
2696 - GET_MODE_BITSIZE (mode
)),
2702 target
= gen_reg_rtx (mode
);
2703 emit_move_insn (target
, gen_lowpart (mode
, x
));
2706 delete_insns_since (last
);
2711 /* Try calculating bswap as two bswaps of two word-sized operands. */
2714 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2718 t1
= expand_unop (word_mode
, bswap_optab
,
2719 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2720 t0
= expand_unop (word_mode
, bswap_optab
,
2721 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2724 target
= gen_reg_rtx (mode
);
2726 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2727 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2728 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2733 /* Try calculating (parity x) as (and (popcount x) 1), where
2734 popcount can also be done in a wider mode. */
2736 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2738 enum mode_class
class = GET_MODE_CLASS (mode
);
2739 if (CLASS_HAS_WIDER_MODES_P (class))
2741 enum machine_mode wider_mode
;
2742 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2743 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2745 if (optab_handler (popcount_optab
, wider_mode
)->insn_code
2746 != CODE_FOR_nothing
)
2748 rtx xop0
, temp
, last
;
2750 last
= get_last_insn ();
2753 target
= gen_reg_rtx (mode
);
2754 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2755 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2758 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2759 target
, true, OPTAB_DIRECT
);
2761 delete_insns_since (last
);
2770 /* Try calculating ctz(x) as K - clz(x & -x) ,
2771 where K is GET_MODE_BITSIZE(mode) - 1.
2773 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2774 don't have to worry about what the hardware does in that case. (If
2775 the clz instruction produces the usual value at 0, which is K, the
2776 result of this code sequence will be -1; expand_ffs, below, relies
2777 on this. It might be nice to have it be K instead, for consistency
2778 with the (very few) processors that provide a ctz with a defined
2779 value, but that would take one more instruction, and it would be
2780 less convenient for expand_ffs anyway. */
2783 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2787 if (optab_handler (clz_optab
, mode
)->insn_code
== CODE_FOR_nothing
)
2792 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2794 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2795 true, OPTAB_DIRECT
);
2797 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2799 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_BITSIZE (mode
) - 1),
2801 true, OPTAB_DIRECT
);
2811 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2817 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2818 else with the sequence used by expand_clz.
2820 The ffs builtin promises to return zero for a zero value and ctz/clz
2821 may have an undefined value in that case. If they do not give us a
2822 convenient value, we have to generate a test and branch. */
2824 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2826 HOST_WIDE_INT val
= 0;
2827 bool defined_at_zero
= false;
2830 if (optab_handler (ctz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2834 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2838 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2840 else if (optab_handler (clz_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2843 temp
= expand_ctz (mode
, op0
, 0);
2847 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2849 defined_at_zero
= true;
2850 val
= (GET_MODE_BITSIZE (mode
) - 1) - val
;
2856 if (defined_at_zero
&& val
== -1)
2857 /* No correction needed at zero. */;
2860 /* We don't try to do anything clever with the situation found
2861 on some processors (eg Alpha) where ctz(0:mode) ==
2862 bitsize(mode). If someone can think of a way to send N to -1
2863 and leave alone all values in the range 0..N-1 (where N is a
2864 power of two), cheaper than this test-and-branch, please add it.
2866 The test-and-branch is done after the operation itself, in case
2867 the operation sets condition codes that can be recycled for this.
2868 (This is true on i386, for instance.) */
2870 rtx nonzero_label
= gen_label_rtx ();
2871 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2872 mode
, true, nonzero_label
);
2874 convert_move (temp
, GEN_INT (-1), false);
2875 emit_label (nonzero_label
);
2878 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2879 to produce a value in the range 0..bitsize. */
2880 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2881 target
, false, OPTAB_DIRECT
);
2888 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2897 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2898 conditions, VAL may already be a SUBREG against which we cannot generate
2899 a further SUBREG. In this case, we expect forcing the value into a
2900 register will work around the situation. */
2903 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2904 enum machine_mode imode
)
2907 ret
= lowpart_subreg (omode
, val
, imode
);
2910 val
= force_reg (imode
, val
);
2911 ret
= lowpart_subreg (omode
, val
, imode
);
2912 gcc_assert (ret
!= NULL
);
2917 /* Expand a floating point absolute value or negation operation via a
2918 logical operation on the sign bit. */
2921 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2922 rtx op0
, rtx target
)
2924 const struct real_format
*fmt
;
2925 int bitpos
, word
, nwords
, i
;
2926 enum machine_mode imode
;
2927 HOST_WIDE_INT hi
, lo
;
2930 /* The format has to have a simple sign bit. */
2931 fmt
= REAL_MODE_FORMAT (mode
);
2935 bitpos
= fmt
->signbit_rw
;
2939 /* Don't create negative zeros if the format doesn't support them. */
2940 if (code
== NEG
&& !fmt
->has_signed_zero
)
2943 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2945 imode
= int_mode_for_mode (mode
);
2946 if (imode
== BLKmode
)
2955 if (FLOAT_WORDS_BIG_ENDIAN
)
2956 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2958 word
= bitpos
/ BITS_PER_WORD
;
2959 bitpos
= bitpos
% BITS_PER_WORD
;
2960 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2963 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2966 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2970 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2976 if (target
== 0 || target
== op0
)
2977 target
= gen_reg_rtx (mode
);
2983 for (i
= 0; i
< nwords
; ++i
)
2985 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2986 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2990 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2992 immed_double_const (lo
, hi
, imode
),
2993 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2994 if (temp
!= targ_piece
)
2995 emit_move_insn (targ_piece
, temp
);
2998 emit_move_insn (targ_piece
, op0_piece
);
3001 insns
= get_insns ();
3004 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
3005 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
3009 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3010 gen_lowpart (imode
, op0
),
3011 immed_double_const (lo
, hi
, imode
),
3012 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3013 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3015 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
3016 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
3022 /* As expand_unop, but will fail rather than attempt the operation in a
3023 different mode or with a libcall. */
3025 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3028 if (optab_handler (unoptab
, mode
)->insn_code
!= CODE_FOR_nothing
)
3030 int icode
= (int) optab_handler (unoptab
, mode
)->insn_code
;
3031 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3033 rtx last
= get_last_insn ();
3039 temp
= gen_reg_rtx (mode
);
3041 if (GET_MODE (xop0
) != VOIDmode
3042 && GET_MODE (xop0
) != mode0
)
3043 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
3045 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3047 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
3048 xop0
= copy_to_mode_reg (mode0
, xop0
);
3050 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
3051 temp
= gen_reg_rtx (mode
);
3053 pat
= GEN_FCN (icode
) (temp
, xop0
);
3056 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3057 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
3059 delete_insns_since (last
);
3060 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3068 delete_insns_since (last
);
3073 /* Generate code to perform an operation specified by UNOPTAB
3074 on operand OP0, with result having machine-mode MODE.
3076 UNSIGNEDP is for the case where we have to widen the operands
3077 to perform the operation. It says to use zero-extension.
3079 If TARGET is nonzero, the value
3080 is generated there, if it is convenient to do so.
3081 In all cases an rtx is returned for the locus of the value;
3082 this may or may not be TARGET. */
3085 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3088 enum mode_class
class = GET_MODE_CLASS (mode
);
3089 enum machine_mode wider_mode
;
3093 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3097 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3099 /* Widening (or narrowing) clz needs special treatment. */
3100 if (unoptab
== clz_optab
)
3102 temp
= widen_clz (mode
, op0
, target
);
3106 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3107 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3109 temp
= expand_doubleword_clz (mode
, op0
, target
);
3117 /* Widening (or narrowing) bswap needs special treatment. */
3118 if (unoptab
== bswap_optab
)
3120 temp
= widen_bswap (mode
, op0
, target
);
3124 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3125 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3127 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3135 if (CLASS_HAS_WIDER_MODES_P (class))
3136 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3137 wider_mode
!= VOIDmode
;
3138 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3140 if (optab_handler (unoptab
, wider_mode
)->insn_code
!= CODE_FOR_nothing
)
3143 rtx last
= get_last_insn ();
3145 /* For certain operations, we need not actually extend
3146 the narrow operand, as long as we will truncate the
3147 results to the same narrowness. */
3149 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3150 (unoptab
== neg_optab
3151 || unoptab
== one_cmpl_optab
)
3152 && class == MODE_INT
);
3154 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3159 if (class != MODE_INT
3160 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
3161 GET_MODE_BITSIZE (wider_mode
)))
3164 target
= gen_reg_rtx (mode
);
3165 convert_move (target
, temp
, 0);
3169 return gen_lowpart (mode
, temp
);
3172 delete_insns_since (last
);
3176 /* These can be done a word at a time. */
3177 if (unoptab
== one_cmpl_optab
3178 && class == MODE_INT
3179 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3180 && optab_handler (unoptab
, word_mode
)->insn_code
!= CODE_FOR_nothing
)
3185 if (target
== 0 || target
== op0
)
3186 target
= gen_reg_rtx (mode
);
3190 /* Do the actual arithmetic. */
3191 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3193 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3194 rtx x
= expand_unop (word_mode
, unoptab
,
3195 operand_subword_force (op0
, i
, mode
),
3196 target_piece
, unsignedp
);
3198 if (target_piece
!= x
)
3199 emit_move_insn (target_piece
, x
);
3202 insns
= get_insns ();
3205 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
3206 gen_rtx_fmt_e (unoptab
->code
, mode
,
3211 if (unoptab
->code
== NEG
)
3213 /* Try negating floating point values by flipping the sign bit. */
3214 if (SCALAR_FLOAT_MODE_P (mode
))
3216 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3221 /* If there is no negation pattern, and we have no negative zero,
3222 try subtracting from zero. */
3223 if (!HONOR_SIGNED_ZEROS (mode
))
3225 temp
= expand_binop (mode
, (unoptab
== negv_optab
3226 ? subv_optab
: sub_optab
),
3227 CONST0_RTX (mode
), op0
, target
,
3228 unsignedp
, OPTAB_DIRECT
);
3234 /* Try calculating parity (x) as popcount (x) % 2. */
3235 if (unoptab
== parity_optab
)
3237 temp
= expand_parity (mode
, op0
, target
);
3242 /* Try implementing ffs (x) in terms of clz (x). */
3243 if (unoptab
== ffs_optab
)
3245 temp
= expand_ffs (mode
, op0
, target
);
3250 /* Try implementing ctz (x) in terms of clz (x). */
3251 if (unoptab
== ctz_optab
)
3253 temp
= expand_ctz (mode
, op0
, target
);
3259 /* Now try a library call in this mode. */
3260 libfunc
= optab_libfunc (unoptab
, mode
);
3265 enum machine_mode outmode
= mode
;
3267 /* All of these functions return small values. Thus we choose to
3268 have them return something that isn't a double-word. */
3269 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3270 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
3272 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
3276 /* Pass 1 for NO_QUEUE so we don't lose any increments
3277 if the libcall is cse'd or moved. */
3278 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3280 insns
= get_insns ();
3283 target
= gen_reg_rtx (outmode
);
3284 emit_libcall_block (insns
, target
, value
,
3285 gen_rtx_fmt_e (unoptab
->code
, outmode
, op0
));
3290 /* It can't be done in this mode. Can we do it in a wider mode? */
3292 if (CLASS_HAS_WIDER_MODES_P (class))
3294 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3295 wider_mode
!= VOIDmode
;
3296 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3298 if ((optab_handler (unoptab
, wider_mode
)->insn_code
3299 != CODE_FOR_nothing
)
3300 || optab_libfunc (unoptab
, wider_mode
))
3303 rtx last
= get_last_insn ();
3305 /* For certain operations, we need not actually extend
3306 the narrow operand, as long as we will truncate the
3307 results to the same narrowness. */
3309 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3310 (unoptab
== neg_optab
3311 || unoptab
== one_cmpl_optab
)
3312 && class == MODE_INT
);
3314 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3317 /* If we are generating clz using wider mode, adjust the
3319 if (unoptab
== clz_optab
&& temp
!= 0)
3320 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3321 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
3322 - GET_MODE_BITSIZE (mode
)),
3323 target
, true, OPTAB_DIRECT
);
3327 if (class != MODE_INT
)
3330 target
= gen_reg_rtx (mode
);
3331 convert_move (target
, temp
, 0);
3335 return gen_lowpart (mode
, temp
);
3338 delete_insns_since (last
);
3343 /* One final attempt at implementing negation via subtraction,
3344 this time allowing widening of the operand. */
3345 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3348 temp
= expand_binop (mode
,
3349 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3350 CONST0_RTX (mode
), op0
,
3351 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3359 /* Emit code to compute the absolute value of OP0, with result to
3360 TARGET if convenient. (TARGET may be 0.) The return value says
3361 where the result actually is to be found.
3363 MODE is the mode of the operand; the mode of the result is
3364 different but can be deduced from MODE.
3369 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3370 int result_unsignedp
)
3375 result_unsignedp
= 1;
3377 /* First try to do it with a special abs instruction. */
3378 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3383 /* For floating point modes, try clearing the sign bit. */
3384 if (SCALAR_FLOAT_MODE_P (mode
))
3386 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3391 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3392 if (optab_handler (smax_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3393 && !HONOR_SIGNED_ZEROS (mode
))
3395 rtx last
= get_last_insn ();
3397 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3399 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3405 delete_insns_since (last
);
3408 /* If this machine has expensive jumps, we can do integer absolute
3409 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3410 where W is the width of MODE. */
3412 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
3414 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3415 size_int (GET_MODE_BITSIZE (mode
) - 1),
3418 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3421 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3422 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3432 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3433 int result_unsignedp
, int safe
)
3438 result_unsignedp
= 1;
3440 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3444 /* If that does not win, use conditional jump and negate. */
3446 /* It is safe to use the target if it is the same
3447 as the source if this is also a pseudo register */
3448 if (op0
== target
&& REG_P (op0
)
3449 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3452 op1
= gen_label_rtx ();
3453 if (target
== 0 || ! safe
3454 || GET_MODE (target
) != mode
3455 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3457 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3458 target
= gen_reg_rtx (mode
);
3460 emit_move_insn (target
, op0
);
3463 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3464 NULL_RTX
, NULL_RTX
, op1
);
3466 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3469 emit_move_insn (target
, op0
);
3475 /* A subroutine of expand_copysign, perform the copysign operation using the
3476 abs and neg primitives advertised to exist on the target. The assumption
3477 is that we have a split register file, and leaving op0 in fp registers,
3478 and not playing with subregs so much, will help the register allocator. */
3481 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3482 int bitpos
, bool op0_is_abs
)
3484 enum machine_mode imode
;
3491 /* Check if the back end provides an insn that handles signbit for the
3493 icode
= (int) signbit_optab
->handlers
[(int) mode
].insn_code
;
3494 if (icode
!= CODE_FOR_nothing
)
3496 imode
= insn_data
[icode
].operand
[0].mode
;
3497 sign
= gen_reg_rtx (imode
);
3498 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3502 HOST_WIDE_INT hi
, lo
;
3504 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3506 imode
= int_mode_for_mode (mode
);
3507 if (imode
== BLKmode
)
3509 op1
= gen_lowpart (imode
, op1
);
3516 if (FLOAT_WORDS_BIG_ENDIAN
)
3517 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3519 word
= bitpos
/ BITS_PER_WORD
;
3520 bitpos
= bitpos
% BITS_PER_WORD
;
3521 op1
= operand_subword_force (op1
, word
, mode
);
3524 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3527 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3531 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3535 sign
= gen_reg_rtx (imode
);
3536 sign
= expand_binop (imode
, and_optab
, op1
,
3537 immed_double_const (lo
, hi
, imode
),
3538 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3543 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3550 if (target
== NULL_RTX
)
3551 target
= copy_to_reg (op0
);
3553 emit_move_insn (target
, op0
);
3556 label
= gen_label_rtx ();
3557 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3559 if (GET_CODE (op0
) == CONST_DOUBLE
)
3560 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3562 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3564 emit_move_insn (target
, op0
);
3572 /* A subroutine of expand_copysign, perform the entire copysign operation
3573 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3574 is true if op0 is known to have its sign bit clear. */
3577 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3578 int bitpos
, bool op0_is_abs
)
3580 enum machine_mode imode
;
3581 HOST_WIDE_INT hi
, lo
;
3582 int word
, nwords
, i
;
3585 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3587 imode
= int_mode_for_mode (mode
);
3588 if (imode
== BLKmode
)
3597 if (FLOAT_WORDS_BIG_ENDIAN
)
3598 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3600 word
= bitpos
/ BITS_PER_WORD
;
3601 bitpos
= bitpos
% BITS_PER_WORD
;
3602 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3605 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3608 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3612 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3616 if (target
== 0 || target
== op0
|| target
== op1
)
3617 target
= gen_reg_rtx (mode
);
3623 for (i
= 0; i
< nwords
; ++i
)
3625 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3626 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3631 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3632 immed_double_const (~lo
, ~hi
, imode
),
3633 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3635 op1
= expand_binop (imode
, and_optab
,
3636 operand_subword_force (op1
, i
, mode
),
3637 immed_double_const (lo
, hi
, imode
),
3638 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3640 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3641 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3642 if (temp
!= targ_piece
)
3643 emit_move_insn (targ_piece
, temp
);
3646 emit_move_insn (targ_piece
, op0_piece
);
3649 insns
= get_insns ();
3652 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3656 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3657 immed_double_const (lo
, hi
, imode
),
3658 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3660 op0
= gen_lowpart (imode
, op0
);
3662 op0
= expand_binop (imode
, and_optab
, op0
,
3663 immed_double_const (~lo
, ~hi
, imode
),
3664 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3666 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3667 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3668 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3674 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3675 scalar floating point mode. Return NULL if we do not know how to
3676 expand the operation inline. */
3679 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3681 enum machine_mode mode
= GET_MODE (op0
);
3682 const struct real_format
*fmt
;
3686 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3687 gcc_assert (GET_MODE (op1
) == mode
);
3689 /* First try to do it with a special instruction. */
3690 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3691 target
, 0, OPTAB_DIRECT
);
3695 fmt
= REAL_MODE_FORMAT (mode
);
3696 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3700 if (GET_CODE (op0
) == CONST_DOUBLE
)
3702 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3703 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3707 if (fmt
->signbit_ro
>= 0
3708 && (GET_CODE (op0
) == CONST_DOUBLE
3709 || (optab_handler (neg_optab
, mode
)->insn_code
!= CODE_FOR_nothing
3710 && optab_handler (abs_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)))
3712 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3713 fmt
->signbit_ro
, op0_is_abs
);
3718 if (fmt
->signbit_rw
< 0)
3720 return expand_copysign_bit (mode
, op0
, op1
, target
,
3721 fmt
->signbit_rw
, op0_is_abs
);
3724 /* Generate an instruction whose insn-code is INSN_CODE,
3725 with two operands: an output TARGET and an input OP0.
3726 TARGET *must* be nonzero, and the output is always stored there.
3727 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3728 the value that is stored into TARGET. */
3731 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3734 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3739 /* Now, if insn does not accept our operands, put them into pseudos. */
3741 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3742 op0
= copy_to_mode_reg (mode0
, op0
);
3744 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3745 temp
= gen_reg_rtx (GET_MODE (temp
));
3747 pat
= GEN_FCN (icode
) (temp
, op0
);
3749 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3750 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3755 emit_move_insn (target
, temp
);
3758 struct no_conflict_data
3760 rtx target
, first
, insn
;
3764 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3765 Set P->must_stay if the currently examined clobber / store has to stay
3766 in the list of insns that constitute the actual no_conflict block /
3769 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3771 struct no_conflict_data
*p
= p0
;
3773 /* If this inns directly contributes to setting the target, it must stay. */
3774 if (reg_overlap_mentioned_p (p
->target
, dest
))
3775 p
->must_stay
= true;
3776 /* If we haven't committed to keeping any other insns in the list yet,
3777 there is nothing more to check. */
3778 else if (p
->insn
== p
->first
)
3780 /* If this insn sets / clobbers a register that feeds one of the insns
3781 already in the list, this insn has to stay too. */
3782 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3783 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3784 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3785 /* Likewise if this insn depends on a register set by a previous
3786 insn in the list, or if it sets a result (presumably a hard
3787 register) that is set or clobbered by a previous insn.
3788 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3789 SET_DEST perform the former check on the address, and the latter
3790 check on the MEM. */
3791 || (GET_CODE (set
) == SET
3792 && (modified_in_p (SET_SRC (set
), p
->first
)
3793 || modified_in_p (SET_DEST (set
), p
->first
)
3794 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3795 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3796 p
->must_stay
= true;
3799 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3800 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3801 is possible to do so. */
3804 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3806 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3808 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3809 encapsulated region would not be in one basic block, i.e. when
3810 there is a control_flow_insn_p insn between FIRST and LAST. */
3811 bool attach_libcall_retval_notes
= true;
3812 rtx insn
, next
= NEXT_INSN (last
);
3814 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3815 if (control_flow_insn_p (insn
))
3817 attach_libcall_retval_notes
= false;
3821 if (attach_libcall_retval_notes
)
3823 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3825 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3827 next
= NEXT_INSN (last
);
3828 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3829 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID
,
3830 GEN_INT (libcall_id
),
3837 /* Emit code to perform a series of operations on a multi-word quantity, one
3840 Such a block is preceded by a CLOBBER of the output, consists of multiple
3841 insns, each setting one word of the output, and followed by a SET copying
3842 the output to itself.
3844 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3845 note indicating that it doesn't conflict with the (also multi-word)
3846 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3849 INSNS is a block of code generated to perform the operation, not including
3850 the CLOBBER and final copy. All insns that compute intermediate values
3851 are first emitted, followed by the block as described above.
3853 TARGET, OP0, and OP1 are the output and inputs of the operations,
3854 respectively. OP1 may be zero for a unary operation.
3856 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3859 If TARGET is not a register, INSNS is simply emitted with no special
3860 processing. Likewise if anything in INSNS is not an INSN or if
3861 there is a libcall block inside INSNS.
3863 The final insn emitted is returned. */
3866 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3868 rtx prev
, next
, first
, last
, insn
;
3870 if (!REG_P (target
) || reload_in_progress
)
3871 return emit_insn (insns
);
3873 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3874 if (!NONJUMP_INSN_P (insn
)
3875 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3876 return emit_insn (insns
);
3878 /* First emit all insns that do not store into words of the output and remove
3879 these from the list. */
3880 for (insn
= insns
; insn
; insn
= next
)
3883 struct no_conflict_data data
;
3885 next
= NEXT_INSN (insn
);
3887 /* Some ports (cris) create a libcall regions at their own. We must
3888 avoid any potential nesting of LIBCALLs. */
3889 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3890 remove_note (insn
, note
);
3891 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3892 remove_note (insn
, note
);
3893 if ((note
= find_reg_note (insn
, REG_LIBCALL_ID
, NULL
)) != NULL
)
3894 remove_note (insn
, note
);
3896 data
.target
= target
;
3900 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3901 if (! data
.must_stay
)
3903 if (PREV_INSN (insn
))
3904 NEXT_INSN (PREV_INSN (insn
)) = next
;
3909 PREV_INSN (next
) = PREV_INSN (insn
);
3915 prev
= get_last_insn ();
3917 /* Now write the CLOBBER of the output, followed by the setting of each
3918 of the words, followed by the final copy. */
3919 if (target
!= op0
&& target
!= op1
)
3920 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3922 for (insn
= insns
; insn
; insn
= next
)
3924 next
= NEXT_INSN (insn
);
3927 if (op1
&& REG_P (op1
))
3928 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3931 if (op0
&& REG_P (op0
))
3932 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3936 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
3937 != CODE_FOR_nothing
)
3939 last
= emit_move_insn (target
, target
);
3941 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3945 last
= get_last_insn ();
3947 /* Remove any existing REG_EQUAL note from "last", or else it will
3948 be mistaken for a note referring to the full contents of the
3949 alleged libcall value when found together with the REG_RETVAL
3950 note added below. An existing note can come from an insn
3951 expansion at "last". */
3952 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3956 first
= get_insns ();
3958 first
= NEXT_INSN (prev
);
3960 maybe_encapsulate_block (first
, last
, equiv
);
3965 /* Emit code to make a call to a constant function or a library call.
3967 INSNS is a list containing all insns emitted in the call.
3968 These insns leave the result in RESULT. Our block is to copy RESULT
3969 to TARGET, which is logically equivalent to EQUIV.
3971 We first emit any insns that set a pseudo on the assumption that these are
3972 loading constants into registers; doing so allows them to be safely cse'ed
3973 between blocks. Then we emit all the other insns in the block, followed by
3974 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3975 note with an operand of EQUIV.
3977 Moving assignments to pseudos outside of the block is done to improve
3978 the generated code, but is not required to generate correct code,
3979 hence being unable to move an assignment is not grounds for not making
3980 a libcall block. There are two reasons why it is safe to leave these
3981 insns inside the block: First, we know that these pseudos cannot be
3982 used in generated RTL outside the block since they are created for
3983 temporary purposes within the block. Second, CSE will not record the
3984 values of anything set inside a libcall block, so we know they must
3985 be dead at the end of the block.
3987 Except for the first group of insns (the ones setting pseudos), the
3988 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3990 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3992 rtx final_dest
= target
;
3993 rtx prev
, next
, first
, last
, insn
;
3995 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3996 into a MEM later. Protect the libcall block from this change. */
3997 if (! REG_P (target
) || REG_USERVAR_P (target
))
3998 target
= gen_reg_rtx (GET_MODE (target
));
4000 /* If we're using non-call exceptions, a libcall corresponding to an
4001 operation that may trap may also trap. */
4002 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
4004 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4007 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4009 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
4010 remove_note (insn
, note
);
4014 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4015 reg note to indicate that this call cannot throw or execute a nonlocal
4016 goto (unless there is already a REG_EH_REGION note, in which case
4018 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4021 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4024 XEXP (note
, 0) = constm1_rtx
;
4026 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
4030 /* First emit all insns that set pseudos. Remove them from the list as
4031 we go. Avoid insns that set pseudos which were referenced in previous
4032 insns. These can be generated by move_by_pieces, for example,
4033 to update an address. Similarly, avoid insns that reference things
4034 set in previous insns. */
4036 for (insn
= insns
; insn
; insn
= next
)
4038 rtx set
= single_set (insn
);
4041 /* Some ports (cris) create a libcall regions at their own. We must
4042 avoid any potential nesting of LIBCALLs. */
4043 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
4044 remove_note (insn
, note
);
4045 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
4046 remove_note (insn
, note
);
4047 if ((note
= find_reg_note (insn
, REG_LIBCALL_ID
, NULL
)) != NULL
)
4048 remove_note (insn
, note
);
4050 next
= NEXT_INSN (insn
);
4052 if (set
!= 0 && REG_P (SET_DEST (set
))
4053 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4055 struct no_conflict_data data
;
4057 data
.target
= const0_rtx
;
4061 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
4062 if (! data
.must_stay
)
4064 if (PREV_INSN (insn
))
4065 NEXT_INSN (PREV_INSN (insn
)) = next
;
4070 PREV_INSN (next
) = PREV_INSN (insn
);
4076 /* Some ports use a loop to copy large arguments onto the stack.
4077 Don't move anything outside such a loop. */
4082 prev
= get_last_insn ();
4084 /* Write the remaining insns followed by the final copy. */
4086 for (insn
= insns
; insn
; insn
= next
)
4088 next
= NEXT_INSN (insn
);
4093 last
= emit_move_insn (target
, result
);
4094 if (optab_handler (mov_optab
, GET_MODE (target
))->insn_code
4095 != CODE_FOR_nothing
)
4096 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
4099 /* Remove any existing REG_EQUAL note from "last", or else it will
4100 be mistaken for a note referring to the full contents of the
4101 libcall value when found together with the REG_RETVAL note added
4102 below. An existing note can come from an insn expansion at
4104 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
4107 if (final_dest
!= target
)
4108 emit_move_insn (final_dest
, target
);
4111 first
= get_insns ();
4113 first
= NEXT_INSN (prev
);
4115 maybe_encapsulate_block (first
, last
, equiv
);
4118 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4119 PURPOSE describes how this comparison will be used. CODE is the rtx
4120 comparison code we will be using.
4122 ??? Actually, CODE is slightly weaker than that. A target is still
4123 required to implement all of the normal bcc operations, but not
4124 required to implement all (or any) of the unordered bcc operations. */
4127 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
4128 enum can_compare_purpose purpose
)
4132 if (optab_handler (cmp_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4134 if (purpose
== ccp_jump
)
4135 return bcc_gen_fctn
[(int) code
] != NULL
;
4136 else if (purpose
== ccp_store_flag
)
4137 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
4139 /* There's only one cmov entry point, and it's allowed to fail. */
4142 if (purpose
== ccp_jump
4143 && optab_handler (cbranch_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4145 if (purpose
== ccp_cmov
4146 && optab_handler (cmov_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4148 if (purpose
== ccp_store_flag
4149 && optab_handler (cstore_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
4151 mode
= GET_MODE_WIDER_MODE (mode
);
4153 while (mode
!= VOIDmode
);
4158 /* This function is called when we are going to emit a compare instruction that
4159 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4161 *PMODE is the mode of the inputs (in case they are const_int).
4162 *PUNSIGNEDP nonzero says that the operands are unsigned;
4163 this matters if they need to be widened.
4165 If they have mode BLKmode, then SIZE specifies the size of both operands.
4167 This function performs all the setup necessary so that the caller only has
4168 to emit a single comparison insn. This setup can involve doing a BLKmode
4169 comparison or emitting a library call to perform the comparison if no insn
4170 is available to handle it.
4171 The values which are passed in through pointers can be modified; the caller
4172 should perform the comparison on the modified values. Constant
4173 comparisons must have already been folded. */
4176 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
4177 enum machine_mode
*pmode
, int *punsignedp
,
4178 enum can_compare_purpose purpose
)
4180 enum machine_mode mode
= *pmode
;
4181 rtx x
= *px
, y
= *py
;
4182 int unsignedp
= *punsignedp
;
4185 /* If we are inside an appropriately-short loop and we are optimizing,
4186 force expensive constants into a register. */
4187 if (CONSTANT_P (x
) && optimize
4188 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
4189 x
= force_reg (mode
, x
);
4191 if (CONSTANT_P (y
) && optimize
4192 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
4193 y
= force_reg (mode
, y
);
4196 /* Make sure if we have a canonical comparison. The RTL
4197 documentation states that canonical comparisons are required only
4198 for targets which have cc0. */
4199 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4202 /* Don't let both operands fail to indicate the mode. */
4203 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4204 x
= force_reg (mode
, x
);
4206 /* Handle all BLKmode compares. */
4208 if (mode
== BLKmode
)
4210 enum machine_mode cmp_mode
, result_mode
;
4211 enum insn_code cmp_code
;
4216 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4220 /* Try to use a memory block compare insn - either cmpstr
4221 or cmpmem will do. */
4222 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4223 cmp_mode
!= VOIDmode
;
4224 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4226 cmp_code
= cmpmem_optab
[cmp_mode
];
4227 if (cmp_code
== CODE_FOR_nothing
)
4228 cmp_code
= cmpstr_optab
[cmp_mode
];
4229 if (cmp_code
== CODE_FOR_nothing
)
4230 cmp_code
= cmpstrn_optab
[cmp_mode
];
4231 if (cmp_code
== CODE_FOR_nothing
)
4234 /* Must make sure the size fits the insn's mode. */
4235 if ((GET_CODE (size
) == CONST_INT
4236 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4237 || (GET_MODE_BITSIZE (GET_MODE (size
))
4238 > GET_MODE_BITSIZE (cmp_mode
)))
4241 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4242 result
= gen_reg_rtx (result_mode
);
4243 size
= convert_to_mode (cmp_mode
, size
, 1);
4244 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4248 *pmode
= result_mode
;
4252 /* Otherwise call a library function, memcmp. */
4253 libfunc
= memcmp_libfunc
;
4254 length_type
= sizetype
;
4255 result_mode
= TYPE_MODE (integer_type_node
);
4256 cmp_mode
= TYPE_MODE (length_type
);
4257 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4258 TYPE_UNSIGNED (length_type
));
4260 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
4267 *pmode
= result_mode
;
4271 /* Don't allow operands to the compare to trap, as that can put the
4272 compare and branch in different basic blocks. */
4273 if (flag_non_call_exceptions
)
4276 x
= force_reg (mode
, x
);
4278 y
= force_reg (mode
, y
);
4283 if (can_compare_p (*pcomparison
, mode
, purpose
))
4286 /* Handle a lib call just for the mode we are using. */
4288 libfunc
= optab_libfunc (cmp_optab
, mode
);
4289 if (libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
4294 /* If we want unsigned, and this mode has a distinct unsigned
4295 comparison routine, use that. */
4298 ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4300 if (unsignedp
&& ulibfunc
)
4303 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4304 targetm
.libgcc_cmp_return_mode (),
4305 2, x
, mode
, y
, mode
);
4307 /* There are two kinds of comparison routines. Biased routines
4308 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4309 of gcc expect that the comparison operation is equivalent
4310 to the modified comparison. For signed comparisons compare the
4311 result against 1 in the biased case, and zero in the unbiased
4312 case. For unsigned comparisons always compare against 1 after
4313 biasing the unbiased result by adding 1. This gives us a way to
4319 if (!TARGET_LIB_INT_CMP_BIASED
)
4322 *px
= plus_constant (result
, 1);
4329 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
4330 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4333 /* Before emitting an insn with code ICODE, make sure that X, which is going
4334 to be used for operand OPNUM of the insn, is converted from mode MODE to
4335 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4336 that it is accepted by the operand predicate. Return the new value. */
4339 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
4340 enum machine_mode wider_mode
, int unsignedp
)
4342 if (mode
!= wider_mode
)
4343 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4345 if (!insn_data
[icode
].operand
[opnum
].predicate
4346 (x
, insn_data
[icode
].operand
[opnum
].mode
))
4348 if (reload_completed
)
4350 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
4356 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4357 we can do the comparison.
4358 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4359 be NULL_RTX which indicates that only a comparison is to be generated. */
4362 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
4363 enum rtx_code comparison
, int unsignedp
, rtx label
)
4365 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
4366 enum mode_class
class = GET_MODE_CLASS (mode
);
4367 enum machine_mode wider_mode
= mode
;
4369 /* Try combined insns first. */
4372 enum insn_code icode
;
4373 PUT_MODE (test
, wider_mode
);
4377 icode
= optab_handler (cbranch_optab
, wider_mode
)->insn_code
;
4379 if (icode
!= CODE_FOR_nothing
4380 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
4382 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
4383 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
4384 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
4389 /* Handle some compares against zero. */
4390 icode
= (int) optab_handler (tst_optab
, wider_mode
)->insn_code
;
4391 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
4393 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4394 emit_insn (GEN_FCN (icode
) (x
));
4396 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4400 /* Handle compares for which there is a directly suitable insn. */
4402 icode
= (int) optab_handler (cmp_optab
, wider_mode
)->insn_code
;
4403 if (icode
!= CODE_FOR_nothing
)
4405 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
4406 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
4407 emit_insn (GEN_FCN (icode
) (x
, y
));
4409 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
4413 if (!CLASS_HAS_WIDER_MODES_P (class))
4416 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
4418 while (wider_mode
!= VOIDmode
);
4423 /* Generate code to compare X with Y so that the condition codes are
4424 set and to jump to LABEL if the condition is true. If X is a
4425 constant and Y is not a constant, then the comparison is swapped to
4426 ensure that the comparison RTL has the canonical form.
4428 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4429 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4430 the proper branch condition code.
4432 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4434 MODE is the mode of the inputs (in case they are const_int).
4436 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4437 be passed unchanged to emit_cmp_insn, then potentially converted into an
4438 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4441 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4442 enum machine_mode mode
, int unsignedp
, rtx label
)
4444 rtx op0
= x
, op1
= y
;
4446 /* Swap operands and condition to ensure canonical RTL. */
4447 if (swap_commutative_operands_p (x
, y
))
4449 /* If we're not emitting a branch, callers are required to pass
4450 operands in an order conforming to canonical RTL. We relax this
4451 for commutative comparisons so callers using EQ don't need to do
4452 swapping by hand. */
4453 gcc_assert (label
|| (comparison
== swap_condition (comparison
)));
4456 comparison
= swap_condition (comparison
);
4460 /* If OP0 is still a constant, then both X and Y must be constants.
4461 Force X into a register to create canonical RTL. */
4462 if (CONSTANT_P (op0
))
4463 op0
= force_reg (mode
, op0
);
4467 comparison
= unsigned_condition (comparison
);
4469 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
4471 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
4474 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4477 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4478 enum machine_mode mode
, int unsignedp
)
4480 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
4483 /* Emit a library call comparison between floating point X and Y.
4484 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4487 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
4488 enum machine_mode
*pmode
, int *punsignedp
)
4490 enum rtx_code comparison
= *pcomparison
;
4491 enum rtx_code swapped
= swap_condition (comparison
);
4492 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4495 enum machine_mode orig_mode
= GET_MODE (x
);
4496 enum machine_mode mode
, cmp_mode
;
4497 rtx value
, target
, insns
, equiv
;
4499 bool reversed_p
= false;
4500 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4502 for (mode
= orig_mode
;
4504 mode
= GET_MODE_WIDER_MODE (mode
))
4506 if ((libfunc
= optab_libfunc (code_to_optab
[comparison
], mode
)))
4509 if ((libfunc
= optab_libfunc (code_to_optab
[swapped
] , mode
)))
4512 tmp
= x
; x
= y
; y
= tmp
;
4513 comparison
= swapped
;
4517 if ((libfunc
= optab_libfunc (code_to_optab
[reversed
], mode
))
4518 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
4520 comparison
= reversed
;
4526 gcc_assert (mode
!= VOIDmode
);
4528 if (mode
!= orig_mode
)
4530 x
= convert_to_mode (mode
, x
, 0);
4531 y
= convert_to_mode (mode
, y
, 0);
4534 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4535 the RTL. The allows the RTL optimizers to delete the libcall if the
4536 condition can be determined at compile-time. */
4537 if (comparison
== UNORDERED
)
4539 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4540 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4541 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4542 temp
, const_true_rtx
, equiv
);
4546 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4547 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4549 rtx true_rtx
, false_rtx
;
4554 true_rtx
= const0_rtx
;
4555 false_rtx
= const_true_rtx
;
4559 true_rtx
= const_true_rtx
;
4560 false_rtx
= const0_rtx
;
4564 true_rtx
= const1_rtx
;
4565 false_rtx
= const0_rtx
;
4569 true_rtx
= const0_rtx
;
4570 false_rtx
= constm1_rtx
;
4574 true_rtx
= constm1_rtx
;
4575 false_rtx
= const0_rtx
;
4579 true_rtx
= const0_rtx
;
4580 false_rtx
= const1_rtx
;
4586 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4587 equiv
, true_rtx
, false_rtx
);
4592 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4593 cmp_mode
, 2, x
, mode
, y
, mode
);
4594 insns
= get_insns ();
4597 target
= gen_reg_rtx (cmp_mode
);
4598 emit_libcall_block (insns
, target
, value
, equiv
);
4600 if (comparison
== UNORDERED
4601 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4602 comparison
= reversed_p
? EQ
: NE
;
4607 *pcomparison
= comparison
;
4611 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4614 emit_indirect_jump (rtx loc
)
4616 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4618 loc
= copy_to_mode_reg (Pmode
, loc
);
4620 emit_jump_insn (gen_indirect_jump (loc
));
4624 #ifdef HAVE_conditional_move
4626 /* Emit a conditional move instruction if the machine supports one for that
4627 condition and machine mode.
4629 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4630 the mode to use should they be constants. If it is VOIDmode, they cannot
4633 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4634 should be stored there. MODE is the mode to use should they be constants.
4635 If it is VOIDmode, they cannot both be constants.
4637 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4638 is not supported. */
4641 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4642 enum machine_mode cmode
, rtx op2
, rtx op3
,
4643 enum machine_mode mode
, int unsignedp
)
4645 rtx tem
, subtarget
, comparison
, insn
;
4646 enum insn_code icode
;
4647 enum rtx_code reversed
;
4649 /* If one operand is constant, make it the second one. Only do this
4650 if the other operand is not constant as well. */
4652 if (swap_commutative_operands_p (op0
, op1
))
4657 code
= swap_condition (code
);
4660 /* get_condition will prefer to generate LT and GT even if the old
4661 comparison was against zero, so undo that canonicalization here since
4662 comparisons against zero are cheaper. */
4663 if (code
== LT
&& op1
== const1_rtx
)
4664 code
= LE
, op1
= const0_rtx
;
4665 else if (code
== GT
&& op1
== constm1_rtx
)
4666 code
= GE
, op1
= const0_rtx
;
4668 if (cmode
== VOIDmode
)
4669 cmode
= GET_MODE (op0
);
4671 if (swap_commutative_operands_p (op2
, op3
)
4672 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4681 if (mode
== VOIDmode
)
4682 mode
= GET_MODE (op2
);
4684 icode
= movcc_gen_code
[mode
];
4686 if (icode
== CODE_FOR_nothing
)
4690 target
= gen_reg_rtx (mode
);
4694 /* If the insn doesn't accept these operands, put them in pseudos. */
4696 if (!insn_data
[icode
].operand
[0].predicate
4697 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4698 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4700 if (!insn_data
[icode
].operand
[2].predicate
4701 (op2
, insn_data
[icode
].operand
[2].mode
))
4702 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4704 if (!insn_data
[icode
].operand
[3].predicate
4705 (op3
, insn_data
[icode
].operand
[3].mode
))
4706 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4708 /* Everything should now be in the suitable form, so emit the compare insn
4709 and then the conditional move. */
4712 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4714 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4715 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4716 return NULL and let the caller figure out how best to deal with this
4718 if (GET_CODE (comparison
) != code
)
4721 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4723 /* If that failed, then give up. */
4729 if (subtarget
!= target
)
4730 convert_move (target
, subtarget
, 0);
4735 /* Return nonzero if a conditional move of mode MODE is supported.
4737 This function is for combine so it can tell whether an insn that looks
4738 like a conditional move is actually supported by the hardware. If we
4739 guess wrong we lose a bit on optimization, but that's it. */
4740 /* ??? sparc64 supports conditionally moving integers values based on fp
4741 comparisons, and vice versa. How do we handle them? */
4744 can_conditionally_move_p (enum machine_mode mode
)
4746 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4752 #endif /* HAVE_conditional_move */
4754 /* Emit a conditional addition instruction if the machine supports one for that
4755 condition and machine mode.
4757 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4758 the mode to use should they be constants. If it is VOIDmode, they cannot
4761 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4762 should be stored there. MODE is the mode to use should they be constants.
4763 If it is VOIDmode, they cannot both be constants.
4765 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4766 is not supported. */
4769 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4770 enum machine_mode cmode
, rtx op2
, rtx op3
,
4771 enum machine_mode mode
, int unsignedp
)
4773 rtx tem
, subtarget
, comparison
, insn
;
4774 enum insn_code icode
;
4775 enum rtx_code reversed
;
4777 /* If one operand is constant, make it the second one. Only do this
4778 if the other operand is not constant as well. */
4780 if (swap_commutative_operands_p (op0
, op1
))
4785 code
= swap_condition (code
);
4788 /* get_condition will prefer to generate LT and GT even if the old
4789 comparison was against zero, so undo that canonicalization here since
4790 comparisons against zero are cheaper. */
4791 if (code
== LT
&& op1
== const1_rtx
)
4792 code
= LE
, op1
= const0_rtx
;
4793 else if (code
== GT
&& op1
== constm1_rtx
)
4794 code
= GE
, op1
= const0_rtx
;
4796 if (cmode
== VOIDmode
)
4797 cmode
= GET_MODE (op0
);
4799 if (swap_commutative_operands_p (op2
, op3
)
4800 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4809 if (mode
== VOIDmode
)
4810 mode
= GET_MODE (op2
);
4812 icode
= optab_handler (addcc_optab
, mode
)->insn_code
;
4814 if (icode
== CODE_FOR_nothing
)
4818 target
= gen_reg_rtx (mode
);
4820 /* If the insn doesn't accept these operands, put them in pseudos. */
4822 if (!insn_data
[icode
].operand
[0].predicate
4823 (target
, insn_data
[icode
].operand
[0].mode
))
4824 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4828 if (!insn_data
[icode
].operand
[2].predicate
4829 (op2
, insn_data
[icode
].operand
[2].mode
))
4830 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4832 if (!insn_data
[icode
].operand
[3].predicate
4833 (op3
, insn_data
[icode
].operand
[3].mode
))
4834 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4836 /* Everything should now be in the suitable form, so emit the compare insn
4837 and then the conditional move. */
4840 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4842 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4843 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4844 return NULL and let the caller figure out how best to deal with this
4846 if (GET_CODE (comparison
) != code
)
4849 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4851 /* If that failed, then give up. */
4857 if (subtarget
!= target
)
4858 convert_move (target
, subtarget
, 0);
4863 /* These functions attempt to generate an insn body, rather than
4864 emitting the insn, but if the gen function already emits them, we
4865 make no attempt to turn them back into naked patterns. */
4867 /* Generate and return an insn body to add Y to X. */
4870 gen_add2_insn (rtx x
, rtx y
)
4872 int icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4874 gcc_assert (insn_data
[icode
].operand
[0].predicate
4875 (x
, insn_data
[icode
].operand
[0].mode
));
4876 gcc_assert (insn_data
[icode
].operand
[1].predicate
4877 (x
, insn_data
[icode
].operand
[1].mode
));
4878 gcc_assert (insn_data
[icode
].operand
[2].predicate
4879 (y
, insn_data
[icode
].operand
[2].mode
));
4881 return GEN_FCN (icode
) (x
, x
, y
);
4884 /* Generate and return an insn body to add r1 and c,
4885 storing the result in r0. */
4888 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4890 int icode
= (int) optab_handler (add_optab
, GET_MODE (r0
))->insn_code
;
4892 if (icode
== CODE_FOR_nothing
4893 || !(insn_data
[icode
].operand
[0].predicate
4894 (r0
, insn_data
[icode
].operand
[0].mode
))
4895 || !(insn_data
[icode
].operand
[1].predicate
4896 (r1
, insn_data
[icode
].operand
[1].mode
))
4897 || !(insn_data
[icode
].operand
[2].predicate
4898 (c
, insn_data
[icode
].operand
[2].mode
)))
4901 return GEN_FCN (icode
) (r0
, r1
, c
);
4905 have_add2_insn (rtx x
, rtx y
)
4909 gcc_assert (GET_MODE (x
) != VOIDmode
);
4911 icode
= (int) optab_handler (add_optab
, GET_MODE (x
))->insn_code
;
4913 if (icode
== CODE_FOR_nothing
)
4916 if (!(insn_data
[icode
].operand
[0].predicate
4917 (x
, insn_data
[icode
].operand
[0].mode
))
4918 || !(insn_data
[icode
].operand
[1].predicate
4919 (x
, insn_data
[icode
].operand
[1].mode
))
4920 || !(insn_data
[icode
].operand
[2].predicate
4921 (y
, insn_data
[icode
].operand
[2].mode
)))
4927 /* Generate and return an insn body to subtract Y from X. */
4930 gen_sub2_insn (rtx x
, rtx y
)
4932 int icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4934 gcc_assert (insn_data
[icode
].operand
[0].predicate
4935 (x
, insn_data
[icode
].operand
[0].mode
));
4936 gcc_assert (insn_data
[icode
].operand
[1].predicate
4937 (x
, insn_data
[icode
].operand
[1].mode
));
4938 gcc_assert (insn_data
[icode
].operand
[2].predicate
4939 (y
, insn_data
[icode
].operand
[2].mode
));
4941 return GEN_FCN (icode
) (x
, x
, y
);
4944 /* Generate and return an insn body to subtract r1 and c,
4945 storing the result in r0. */
4948 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4950 int icode
= (int) optab_handler (sub_optab
, GET_MODE (r0
))->insn_code
;
4952 if (icode
== CODE_FOR_nothing
4953 || !(insn_data
[icode
].operand
[0].predicate
4954 (r0
, insn_data
[icode
].operand
[0].mode
))
4955 || !(insn_data
[icode
].operand
[1].predicate
4956 (r1
, insn_data
[icode
].operand
[1].mode
))
4957 || !(insn_data
[icode
].operand
[2].predicate
4958 (c
, insn_data
[icode
].operand
[2].mode
)))
4961 return GEN_FCN (icode
) (r0
, r1
, c
);
4965 have_sub2_insn (rtx x
, rtx y
)
4969 gcc_assert (GET_MODE (x
) != VOIDmode
);
4971 icode
= (int) optab_handler (sub_optab
, GET_MODE (x
))->insn_code
;
4973 if (icode
== CODE_FOR_nothing
)
4976 if (!(insn_data
[icode
].operand
[0].predicate
4977 (x
, insn_data
[icode
].operand
[0].mode
))
4978 || !(insn_data
[icode
].operand
[1].predicate
4979 (x
, insn_data
[icode
].operand
[1].mode
))
4980 || !(insn_data
[icode
].operand
[2].predicate
4981 (y
, insn_data
[icode
].operand
[2].mode
)))
4987 /* Generate the body of an instruction to copy Y into X.
4988 It may be a list of insns, if one insn isn't enough. */
4991 gen_move_insn (rtx x
, rtx y
)
4996 emit_move_insn_1 (x
, y
);
5002 /* Return the insn code used to extend FROM_MODE to TO_MODE.
5003 UNSIGNEDP specifies zero-extension instead of sign-extension. If
5004 no such operation exists, CODE_FOR_nothing will be returned. */
5007 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
5011 #ifdef HAVE_ptr_extend
5013 return CODE_FOR_ptr_extend
;
5016 tab
= unsignedp
? zext_optab
: sext_optab
;
5017 return convert_optab_handler (tab
, to_mode
, from_mode
)->insn_code
;
5020 /* Generate the body of an insn to extend Y (with mode MFROM)
5021 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5024 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
5025 enum machine_mode mfrom
, int unsignedp
)
5027 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5028 return GEN_FCN (icode
) (x
, y
);
5031 /* can_fix_p and can_float_p say whether the target machine
5032 can directly convert a given fixed point type to
5033 a given floating point type, or vice versa.
5034 The returned value is the CODE_FOR_... value to use,
5035 or CODE_FOR_nothing if these modes cannot be directly converted.
5037 *TRUNCP_PTR is set to 1 if it is necessary to output
5038 an explicit FTRUNC insn before the fix insn; otherwise 0. */
5040 static enum insn_code
5041 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
5042 int unsignedp
, int *truncp_ptr
)
5045 enum insn_code icode
;
5047 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
5048 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5049 if (icode
!= CODE_FOR_nothing
)
5055 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
5056 for this to work. We need to rework the fix* and ftrunc* patterns
5057 and documentation. */
5058 tab
= unsignedp
? ufix_optab
: sfix_optab
;
5059 icode
= convert_optab_handler (tab
, fixmode
, fltmode
)->insn_code
;
5060 if (icode
!= CODE_FOR_nothing
5061 && optab_handler (ftrunc_optab
, fltmode
)->insn_code
!= CODE_FOR_nothing
)
5068 return CODE_FOR_nothing
;
5071 static enum insn_code
5072 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
5077 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5078 return convert_optab_handler (tab
, fltmode
, fixmode
)->insn_code
;
5081 /* Generate code to convert FROM to floating point
5082 and store in TO. FROM must be fixed point and not VOIDmode.
5083 UNSIGNEDP nonzero means regard FROM as unsigned.
5084 Normally this is done by correcting the final value
5085 if it is negative. */
5088 expand_float (rtx to
, rtx from
, int unsignedp
)
5090 enum insn_code icode
;
5092 enum machine_mode fmode
, imode
;
5093 bool can_do_signed
= false;
5095 /* Crash now, because we won't be able to decide which mode to use. */
5096 gcc_assert (GET_MODE (from
) != VOIDmode
);
5098 /* Look for an insn to do the conversion. Do it in the specified
5099 modes if possible; otherwise convert either input, output or both to
5100 wider mode. If the integer mode is wider than the mode of FROM,
5101 we can do the conversion signed even if the input is unsigned. */
5103 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5104 fmode
= GET_MODE_WIDER_MODE (fmode
))
5105 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
5106 imode
= GET_MODE_WIDER_MODE (imode
))
5108 int doing_unsigned
= unsignedp
;
5110 if (fmode
!= GET_MODE (to
)
5111 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
5114 icode
= can_float_p (fmode
, imode
, unsignedp
);
5115 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5117 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5118 if (scode
!= CODE_FOR_nothing
)
5119 can_do_signed
= true;
5120 if (imode
!= GET_MODE (from
))
5121 icode
= scode
, doing_unsigned
= 0;
5124 if (icode
!= CODE_FOR_nothing
)
5126 if (imode
!= GET_MODE (from
))
5127 from
= convert_to_mode (imode
, from
, unsignedp
);
5129 if (fmode
!= GET_MODE (to
))
5130 target
= gen_reg_rtx (fmode
);
5132 emit_unop_insn (icode
, target
, from
,
5133 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5136 convert_move (to
, target
, 0);
5141 /* Unsigned integer, and no way to convert directly. Convert as signed,
5142 then unconditionally adjust the result. For decimal float values we
5143 do this only if we have already determined that a signed conversion
5144 provides sufficient accuracy. */
5145 if (unsignedp
&& (can_do_signed
|| !DECIMAL_FLOAT_MODE_P (GET_MODE (to
))))
5147 rtx label
= gen_label_rtx ();
5149 REAL_VALUE_TYPE offset
;
5151 /* Look for a usable floating mode FMODE wider than the source and at
5152 least as wide as the target. Using FMODE will avoid rounding woes
5153 with unsigned values greater than the signed maximum value. */
5155 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5156 fmode
= GET_MODE_WIDER_MODE (fmode
))
5157 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
5158 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
5161 if (fmode
== VOIDmode
)
5163 /* There is no such mode. Pretend the target is wide enough. */
5164 fmode
= GET_MODE (to
);
5166 /* Avoid double-rounding when TO is narrower than FROM. */
5167 if ((significand_size (fmode
) + 1)
5168 < GET_MODE_BITSIZE (GET_MODE (from
)))
5171 rtx neglabel
= gen_label_rtx ();
5173 /* Don't use TARGET if it isn't a register, is a hard register,
5174 or is the wrong mode. */
5176 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5177 || GET_MODE (target
) != fmode
)
5178 target
= gen_reg_rtx (fmode
);
5180 imode
= GET_MODE (from
);
5181 do_pending_stack_adjust ();
5183 /* Test whether the sign bit is set. */
5184 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5187 /* The sign bit is not set. Convert as signed. */
5188 expand_float (target
, from
, 0);
5189 emit_jump_insn (gen_jump (label
));
5192 /* The sign bit is set.
5193 Convert to a usable (positive signed) value by shifting right
5194 one bit, while remembering if a nonzero bit was shifted
5195 out; i.e., compute (from & 1) | (from >> 1). */
5197 emit_label (neglabel
);
5198 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5199 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5200 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
5202 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5204 expand_float (target
, temp
, 0);
5206 /* Multiply by 2 to undo the shift above. */
5207 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5208 target
, 0, OPTAB_LIB_WIDEN
);
5210 emit_move_insn (target
, temp
);
5212 do_pending_stack_adjust ();
5218 /* If we are about to do some arithmetic to correct for an
5219 unsigned operand, do it in a pseudo-register. */
5221 if (GET_MODE (to
) != fmode
5222 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5223 target
= gen_reg_rtx (fmode
);
5225 /* Convert as signed integer to floating. */
5226 expand_float (target
, from
, 0);
5228 /* If FROM is negative (and therefore TO is negative),
5229 correct its value by 2**bitwidth. */
5231 do_pending_stack_adjust ();
5232 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5236 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)), fmode
);
5237 temp
= expand_binop (fmode
, add_optab
, target
,
5238 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5239 target
, 0, OPTAB_LIB_WIDEN
);
5241 emit_move_insn (target
, temp
);
5243 do_pending_stack_adjust ();
5248 /* No hardware instruction available; call a library routine. */
5253 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5255 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
5256 from
= convert_to_mode (SImode
, from
, unsignedp
);
5258 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5259 gcc_assert (libfunc
);
5263 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5264 GET_MODE (to
), 1, from
,
5266 insns
= get_insns ();
5269 emit_libcall_block (insns
, target
, value
,
5270 gen_rtx_FLOAT (GET_MODE (to
), from
));
5275 /* Copy result to requested destination
5276 if we have been computing in a temp location. */
5280 if (GET_MODE (target
) == GET_MODE (to
))
5281 emit_move_insn (to
, target
);
5283 convert_move (to
, target
, 0);
5287 /* Generate code to convert FROM to fixed point and store in TO. FROM
5288 must be floating point. */
5291 expand_fix (rtx to
, rtx from
, int unsignedp
)
5293 enum insn_code icode
;
5295 enum machine_mode fmode
, imode
;
5298 /* We first try to find a pair of modes, one real and one integer, at
5299 least as wide as FROM and TO, respectively, in which we can open-code
5300 this conversion. If the integer mode is wider than the mode of TO,
5301 we can do the conversion either signed or unsigned. */
5303 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5304 fmode
= GET_MODE_WIDER_MODE (fmode
))
5305 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5306 imode
= GET_MODE_WIDER_MODE (imode
))
5308 int doing_unsigned
= unsignedp
;
5310 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5311 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5312 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5314 if (icode
!= CODE_FOR_nothing
)
5316 if (fmode
!= GET_MODE (from
))
5317 from
= convert_to_mode (fmode
, from
, 0);
5321 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5322 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5326 if (imode
!= GET_MODE (to
))
5327 target
= gen_reg_rtx (imode
);
5329 emit_unop_insn (icode
, target
, from
,
5330 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5332 convert_move (to
, target
, unsignedp
);
5337 /* For an unsigned conversion, there is one more way to do it.
5338 If we have a signed conversion, we generate code that compares
5339 the real value to the largest representable positive number. If if
5340 is smaller, the conversion is done normally. Otherwise, subtract
5341 one plus the highest signed number, convert, and add it back.
5343 We only need to check all real modes, since we know we didn't find
5344 anything with a wider integer mode.
5346 This code used to extend FP value into mode wider than the destination.
5347 This is needed for decimal float modes which cannot accurately
5348 represent one plus the highest signed number of the same size, but
5349 not for binary modes. Consider, for instance conversion from SFmode
5352 The hot path through the code is dealing with inputs smaller than 2^63
5353 and doing just the conversion, so there is no bits to lose.
5355 In the other path we know the value is positive in the range 2^63..2^64-1
5356 inclusive. (as for other input overflow happens and result is undefined)
5357 So we know that the most important bit set in mantissa corresponds to
5358 2^63. The subtraction of 2^63 should not generate any rounding as it
5359 simply clears out that bit. The rest is trivial. */
5361 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5362 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5363 fmode
= GET_MODE_WIDER_MODE (fmode
))
5364 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5365 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5366 || GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))))
5369 REAL_VALUE_TYPE offset
;
5370 rtx limit
, lab1
, lab2
, insn
;
5372 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5373 real_2expN (&offset
, bitsize
- 1, fmode
);
5374 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5375 lab1
= gen_label_rtx ();
5376 lab2
= gen_label_rtx ();
5378 if (fmode
!= GET_MODE (from
))
5379 from
= convert_to_mode (fmode
, from
, 0);
5381 /* See if we need to do the subtraction. */
5382 do_pending_stack_adjust ();
5383 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5386 /* If not, do the signed "fix" and branch around fixup code. */
5387 expand_fix (to
, from
, 0);
5388 emit_jump_insn (gen_jump (lab2
));
5391 /* Otherwise, subtract 2**(N-1), convert to signed number,
5392 then add 2**(N-1). Do the addition using XOR since this
5393 will often generate better code. */
5395 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5396 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5397 expand_fix (to
, target
, 0);
5398 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5400 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5402 to
, 1, OPTAB_LIB_WIDEN
);
5405 emit_move_insn (to
, target
);
5409 if (optab_handler (mov_optab
, GET_MODE (to
))->insn_code
5410 != CODE_FOR_nothing
)
5412 /* Make a place for a REG_NOTE and add it. */
5413 insn
= emit_move_insn (to
, to
);
5414 set_unique_reg_note (insn
,
5416 gen_rtx_fmt_e (UNSIGNED_FIX
,
5424 /* We can't do it with an insn, so use a library call. But first ensure
5425 that the mode of TO is at least as wide as SImode, since those are the
5426 only library calls we know about. */
5428 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5430 target
= gen_reg_rtx (SImode
);
5432 expand_fix (target
, from
, unsignedp
);
5440 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5441 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5442 gcc_assert (libfunc
);
5446 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5447 GET_MODE (to
), 1, from
,
5449 insns
= get_insns ();
5452 emit_libcall_block (insns
, target
, value
,
5453 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5454 GET_MODE (to
), from
));
5459 if (GET_MODE (to
) == GET_MODE (target
))
5460 emit_move_insn (to
, target
);
5462 convert_move (to
, target
, 0);
5466 /* Generate code to convert FROM or TO a fixed-point.
5467 If UINTP is true, either TO or FROM is an unsigned integer.
5468 If SATP is true, we need to saturate the result. */
5471 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5473 enum machine_mode to_mode
= GET_MODE (to
);
5474 enum machine_mode from_mode
= GET_MODE (from
);
5476 enum rtx_code this_code
;
5477 enum insn_code code
;
5481 if (to_mode
== from_mode
)
5483 emit_move_insn (to
, from
);
5489 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5490 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5494 tab
= satp
? satfract_optab
: fract_optab
;
5495 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5497 code
= tab
->handlers
[to_mode
][from_mode
].insn_code
;
5498 if (code
!= CODE_FOR_nothing
)
5500 emit_unop_insn (code
, to
, from
, this_code
);
5504 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5505 gcc_assert (libfunc
);
5508 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5509 1, from
, from_mode
);
5510 insns
= get_insns ();
5513 emit_libcall_block (insns
, to
, value
,
5514 gen_rtx_fmt_e (tab
->code
, to_mode
, from
));
5517 /* Generate code to convert FROM to fixed point and store in TO. FROM
5518 must be floating point, TO must be signed. Use the conversion optab
5519 TAB to do the conversion. */
5522 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5524 enum insn_code icode
;
5526 enum machine_mode fmode
, imode
;
5528 /* We first try to find a pair of modes, one real and one integer, at
5529 least as wide as FROM and TO, respectively, in which we can open-code
5530 this conversion. If the integer mode is wider than the mode of TO,
5531 we can do the conversion either signed or unsigned. */
5533 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5534 fmode
= GET_MODE_WIDER_MODE (fmode
))
5535 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5536 imode
= GET_MODE_WIDER_MODE (imode
))
5538 icode
= convert_optab_handler (tab
, imode
, fmode
)->insn_code
;
5539 if (icode
!= CODE_FOR_nothing
)
5541 if (fmode
!= GET_MODE (from
))
5542 from
= convert_to_mode (fmode
, from
, 0);
5544 if (imode
!= GET_MODE (to
))
5545 target
= gen_reg_rtx (imode
);
5547 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
5549 convert_move (to
, target
, 0);
5557 /* Report whether we have an instruction to perform the operation
5558 specified by CODE on operands of mode MODE. */
5560 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5562 return (code_to_optab
[(int) code
] != 0
5563 && (optab_handler (code_to_optab
[(int) code
], mode
)->insn_code
5564 != CODE_FOR_nothing
));
5567 /* Create a blank optab. */
5572 optab op
= xcalloc (sizeof (struct optab
), 1);
5574 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5575 optab_handler (op
, i
)->insn_code
= CODE_FOR_nothing
;
5580 static convert_optab
5581 new_convert_optab (void)
5584 convert_optab op
= xcalloc (sizeof (struct convert_optab
), 1);
5586 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5587 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5588 convert_optab_handler (op
, i
, j
)->insn_code
= CODE_FOR_nothing
;
5593 /* Same, but fill in its code as CODE, and write it into the
5594 code_to_optab table. */
5596 init_optab (enum rtx_code code
)
5598 optab op
= new_optab ();
5600 code_to_optab
[(int) code
] = op
;
5604 /* Same, but fill in its code as CODE, and do _not_ write it into
5605 the code_to_optab table. */
5607 init_optabv (enum rtx_code code
)
5609 optab op
= new_optab ();
5614 /* Conversion optabs never go in the code_to_optab table. */
5615 static inline convert_optab
5616 init_convert_optab (enum rtx_code code
)
5618 convert_optab op
= new_convert_optab ();
5623 /* Initialize the libfunc fields of an entire group of entries in some
5624 optab. Each entry is set equal to a string consisting of a leading
5625 pair of underscores followed by a generic operation name followed by
5626 a mode name (downshifted to lowercase) followed by a single character
5627 representing the number of operands for the given operation (which is
5628 usually one of the characters '2', '3', or '4').
5630 OPTABLE is the table in which libfunc fields are to be initialized.
5631 OPNAME is the generic (string) name of the operation.
5632 SUFFIX is the character which specifies the number of operands for
5633 the given generic operation.
5634 MODE is the mode to generate for.
5638 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5640 unsigned opname_len
= strlen (opname
);
5641 const char *mname
= GET_MODE_NAME (mode
);
5642 unsigned mname_len
= strlen (mname
);
5643 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5650 for (q
= opname
; *q
; )
5652 for (q
= mname
; *q
; q
++)
5653 *p
++ = TOLOWER (*q
);
5657 set_optab_libfunc (optable
, mode
,
5658 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5661 /* Like gen_libfunc, but verify that integer operation is involved. */
5664 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5665 enum machine_mode mode
)
5667 int maxsize
= 2 * BITS_PER_WORD
;
5669 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5671 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5672 maxsize
= LONG_LONG_TYPE_SIZE
;
5673 if (GET_MODE_CLASS (mode
) != MODE_INT
5674 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5676 gen_libfunc (optable
, opname
, suffix
, mode
);
5679 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5682 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5683 enum machine_mode mode
)
5687 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5688 gen_libfunc (optable
, opname
, suffix
, mode
);
5689 if (DECIMAL_FLOAT_MODE_P (mode
))
5691 dec_opname
= alloca (sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5692 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5693 depending on the low level floating format used. */
5694 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5695 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5696 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5700 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5703 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5704 enum machine_mode mode
)
5706 if (!ALL_FIXED_POINT_MODE_P (mode
))
5708 gen_libfunc (optable
, opname
, suffix
, mode
);
5711 /* Like gen_libfunc, but verify that signed fixed-point operation is
5715 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5716 enum machine_mode mode
)
5718 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5720 gen_libfunc (optable
, opname
, suffix
, mode
);
5723 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5727 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5728 enum machine_mode mode
)
5730 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5732 gen_libfunc (optable
, opname
, suffix
, mode
);
5735 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5738 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5739 enum machine_mode mode
)
5741 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5742 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5743 if (INTEGRAL_MODE_P (mode
))
5744 gen_int_libfunc (optable
, name
, suffix
, mode
);
5747 /* Like gen_libfunc, but verify that FP or INT operation is involved
5748 and add 'v' suffix for integer operation. */
5751 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5752 enum machine_mode mode
)
5754 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5755 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5756 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5758 int len
= strlen (name
);
5759 char *v_name
= alloca (len
+ 2);
5760 strcpy (v_name
, name
);
5762 v_name
[len
+ 1] = 0;
5763 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5767 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5771 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5772 enum machine_mode mode
)
5774 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5775 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5776 if (INTEGRAL_MODE_P (mode
))
5777 gen_int_libfunc (optable
, name
, suffix
, mode
);
5778 if (ALL_FIXED_POINT_MODE_P (mode
))
5779 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5782 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5786 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5787 enum machine_mode mode
)
5789 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5790 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5791 if (INTEGRAL_MODE_P (mode
))
5792 gen_int_libfunc (optable
, name
, suffix
, mode
);
5793 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5794 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5797 /* Like gen_libfunc, but verify that INT or FIXED operation is
5801 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5802 enum machine_mode mode
)
5804 if (INTEGRAL_MODE_P (mode
))
5805 gen_int_libfunc (optable
, name
, suffix
, mode
);
5806 if (ALL_FIXED_POINT_MODE_P (mode
))
5807 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5810 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5814 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5815 enum machine_mode mode
)
5817 if (INTEGRAL_MODE_P (mode
))
5818 gen_int_libfunc (optable
, name
, suffix
, mode
);
5819 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5820 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5823 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5827 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5828 enum machine_mode mode
)
5830 if (INTEGRAL_MODE_P (mode
))
5831 gen_int_libfunc (optable
, name
, suffix
, mode
);
5832 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5833 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5836 /* Initialize the libfunc fields of an entire group of entries of an
5837 inter-mode-class conversion optab. The string formation rules are
5838 similar to the ones for init_libfuncs, above, but instead of having
5839 a mode name and an operand count these functions have two mode names
5840 and no operand count. */
5843 gen_interclass_conv_libfunc (convert_optab tab
,
5845 enum machine_mode tmode
,
5846 enum machine_mode fmode
)
5848 size_t opname_len
= strlen (opname
);
5849 size_t mname_len
= 0;
5851 const char *fname
, *tname
;
5853 char *libfunc_name
, *suffix
;
5854 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5857 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5858 depends on which underlying decimal floating point format is used. */
5859 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5861 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5863 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5864 nondec_name
[0] = '_';
5865 nondec_name
[1] = '_';
5866 memcpy (&nondec_name
[2], opname
, opname_len
);
5867 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5869 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5872 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5873 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5874 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5876 fname
= GET_MODE_NAME (fmode
);
5877 tname
= GET_MODE_NAME (tmode
);
5879 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5881 libfunc_name
= dec_name
;
5882 suffix
= dec_suffix
;
5886 libfunc_name
= nondec_name
;
5887 suffix
= nondec_suffix
;
5891 for (q
= fname
; *q
; p
++, q
++)
5893 for (q
= tname
; *q
; p
++, q
++)
5898 set_conv_libfunc (tab
, tmode
, fmode
,
5899 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5902 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5903 int->fp conversion. */
5906 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5908 enum machine_mode tmode
,
5909 enum machine_mode fmode
)
5911 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5913 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5915 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5918 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5922 gen_ufloat_conv_libfunc (convert_optab tab
,
5923 const char *opname ATTRIBUTE_UNUSED
,
5924 enum machine_mode tmode
,
5925 enum machine_mode fmode
)
5927 if (DECIMAL_FLOAT_MODE_P (tmode
))
5928 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5930 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5933 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5934 fp->int conversion. */
5937 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5939 enum machine_mode tmode
,
5940 enum machine_mode fmode
)
5942 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5944 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5946 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5949 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5950 fp->int conversion with no decimal floating point involved. */
5953 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5955 enum machine_mode tmode
,
5956 enum machine_mode fmode
)
5958 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5960 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5962 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5965 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5966 The string formation rules are
5967 similar to the ones for init_libfunc, above. */
5970 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5971 enum machine_mode tmode
, enum machine_mode fmode
)
5973 size_t opname_len
= strlen (opname
);
5974 size_t mname_len
= 0;
5976 const char *fname
, *tname
;
5978 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5979 char *libfunc_name
, *suffix
;
5982 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5983 depends on which underlying decimal floating point format is used. */
5984 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5986 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5988 nondec_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5989 nondec_name
[0] = '_';
5990 nondec_name
[1] = '_';
5991 memcpy (&nondec_name
[2], opname
, opname_len
);
5992 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5994 dec_name
= alloca (2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5997 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5998 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5999 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
6001 fname
= GET_MODE_NAME (fmode
);
6002 tname
= GET_MODE_NAME (tmode
);
6004 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
6006 libfunc_name
= dec_name
;
6007 suffix
= dec_suffix
;
6011 libfunc_name
= nondec_name
;
6012 suffix
= nondec_suffix
;
6016 for (q
= fname
; *q
; p
++, q
++)
6018 for (q
= tname
; *q
; p
++, q
++)
6024 set_conv_libfunc (tab
, tmode
, fmode
,
6025 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
6028 /* Pick proper libcall for trunc_optab. We need to chose if we do
6029 truncation or extension and interclass or intraclass. */
6032 gen_trunc_conv_libfunc (convert_optab tab
,
6034 enum machine_mode tmode
,
6035 enum machine_mode fmode
)
6037 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6039 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6044 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6045 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6046 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6048 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
6051 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6052 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6053 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6054 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6057 /* Pick proper libcall for extend_optab. We need to chose if we do
6058 truncation or extension and interclass or intraclass. */
6061 gen_extend_conv_libfunc (convert_optab tab
,
6062 const char *opname ATTRIBUTE_UNUSED
,
6063 enum machine_mode tmode
,
6064 enum machine_mode fmode
)
6066 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6068 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6073 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6074 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6075 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6077 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
6080 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6081 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6082 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6083 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6086 /* Pick proper libcall for fract_optab. We need to chose if we do
6087 interclass or intraclass. */
6090 gen_fract_conv_libfunc (convert_optab tab
,
6092 enum machine_mode tmode
,
6093 enum machine_mode fmode
)
6097 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
6100 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6101 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6103 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6106 /* Pick proper libcall for fractuns_optab. */
6109 gen_fractuns_conv_libfunc (convert_optab tab
,
6111 enum machine_mode tmode
,
6112 enum machine_mode fmode
)
6116 /* One mode must be a fixed-point mode, and the other must be an integer
6118 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
6119 || (ALL_FIXED_POINT_MODE_P (fmode
)
6120 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
6123 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6126 /* Pick proper libcall for satfract_optab. We need to chose if we do
6127 interclass or intraclass. */
6130 gen_satfract_conv_libfunc (convert_optab tab
,
6132 enum machine_mode tmode
,
6133 enum machine_mode fmode
)
6137 /* TMODE must be a fixed-point mode. */
6138 if (!ALL_FIXED_POINT_MODE_P (tmode
))
6141 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6142 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6144 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6147 /* Pick proper libcall for satfractuns_optab. */
6150 gen_satfractuns_conv_libfunc (convert_optab tab
,
6152 enum machine_mode tmode
,
6153 enum machine_mode fmode
)
6157 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6158 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
6161 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6165 init_one_libfunc (const char *name
)
6169 /* Create a FUNCTION_DECL that can be passed to
6170 targetm.encode_section_info. */
6171 /* ??? We don't have any type information except for this is
6172 a function. Pretend this is "int foo()". */
6173 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
6174 build_function_type (integer_type_node
, NULL_TREE
));
6175 DECL_ARTIFICIAL (decl
) = 1;
6176 DECL_EXTERNAL (decl
) = 1;
6177 TREE_PUBLIC (decl
) = 1;
6179 symbol
= XEXP (DECL_RTL (decl
), 0);
6181 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6182 are the flags assigned by targetm.encode_section_info. */
6183 SET_SYMBOL_REF_DECL (symbol
, 0);
6188 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6189 MODE to NAME, which should be either 0 or a string constant. */
6191 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
6194 struct libfunc_entry e
;
6195 struct libfunc_entry
**slot
;
6196 e
.optab
= (size_t) (optab_table
[0] - optable
);
6201 val
= init_one_libfunc (name
);
6204 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6206 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
6207 (*slot
)->optab
= (size_t) (optab_table
[0] - optable
);
6208 (*slot
)->mode1
= mode
;
6209 (*slot
)->mode2
= VOIDmode
;
6210 (*slot
)->libfunc
= val
;
6213 /* Call this to reset the function entry for one conversion optab
6214 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6215 either 0 or a string constant. */
6217 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
6218 enum machine_mode fmode
, const char *name
)
6221 struct libfunc_entry e
;
6222 struct libfunc_entry
**slot
;
6223 e
.optab
= (size_t) (convert_optab_table
[0] - optable
);
6228 val
= init_one_libfunc (name
);
6231 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6233 *slot
= ggc_alloc (sizeof (struct libfunc_entry
));
6234 (*slot
)->optab
= (size_t) (convert_optab_table
[0] - optable
);
6235 (*slot
)->mode1
= tmode
;
6236 (*slot
)->mode2
= fmode
;
6237 (*slot
)->libfunc
= val
;
6240 /* Call this to initialize the contents of the optabs
6241 appropriately for the current target machine. */
6247 enum machine_mode int_mode
;
6249 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
6250 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6252 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
6253 setcc_gen_code
[i
] = CODE_FOR_nothing
;
6255 #ifdef HAVE_conditional_move
6256 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6257 movcc_gen_code
[i
] = CODE_FOR_nothing
;
6260 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6262 vcond_gen_code
[i
] = CODE_FOR_nothing
;
6263 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
6266 add_optab
= init_optab (PLUS
);
6267 addv_optab
= init_optabv (PLUS
);
6268 sub_optab
= init_optab (MINUS
);
6269 subv_optab
= init_optabv (MINUS
);
6270 ssadd_optab
= init_optab (SS_PLUS
);
6271 usadd_optab
= init_optab (US_PLUS
);
6272 sssub_optab
= init_optab (SS_MINUS
);
6273 ussub_optab
= init_optab (US_MINUS
);
6274 smul_optab
= init_optab (MULT
);
6275 ssmul_optab
= init_optab (SS_MULT
);
6276 usmul_optab
= init_optab (US_MULT
);
6277 smulv_optab
= init_optabv (MULT
);
6278 smul_highpart_optab
= init_optab (UNKNOWN
);
6279 umul_highpart_optab
= init_optab (UNKNOWN
);
6280 smul_widen_optab
= init_optab (UNKNOWN
);
6281 umul_widen_optab
= init_optab (UNKNOWN
);
6282 usmul_widen_optab
= init_optab (UNKNOWN
);
6283 smadd_widen_optab
= init_optab (UNKNOWN
);
6284 umadd_widen_optab
= init_optab (UNKNOWN
);
6285 ssmadd_widen_optab
= init_optab (UNKNOWN
);
6286 usmadd_widen_optab
= init_optab (UNKNOWN
);
6287 smsub_widen_optab
= init_optab (UNKNOWN
);
6288 umsub_widen_optab
= init_optab (UNKNOWN
);
6289 ssmsub_widen_optab
= init_optab (UNKNOWN
);
6290 usmsub_widen_optab
= init_optab (UNKNOWN
);
6291 sdiv_optab
= init_optab (DIV
);
6292 ssdiv_optab
= init_optab (SS_DIV
);
6293 usdiv_optab
= init_optab (US_DIV
);
6294 sdivv_optab
= init_optabv (DIV
);
6295 sdivmod_optab
= init_optab (UNKNOWN
);
6296 udiv_optab
= init_optab (UDIV
);
6297 udivmod_optab
= init_optab (UNKNOWN
);
6298 smod_optab
= init_optab (MOD
);
6299 umod_optab
= init_optab (UMOD
);
6300 fmod_optab
= init_optab (UNKNOWN
);
6301 remainder_optab
= init_optab (UNKNOWN
);
6302 ftrunc_optab
= init_optab (UNKNOWN
);
6303 and_optab
= init_optab (AND
);
6304 ior_optab
= init_optab (IOR
);
6305 xor_optab
= init_optab (XOR
);
6306 ashl_optab
= init_optab (ASHIFT
);
6307 ssashl_optab
= init_optab (SS_ASHIFT
);
6308 usashl_optab
= init_optab (US_ASHIFT
);
6309 ashr_optab
= init_optab (ASHIFTRT
);
6310 lshr_optab
= init_optab (LSHIFTRT
);
6311 rotl_optab
= init_optab (ROTATE
);
6312 rotr_optab
= init_optab (ROTATERT
);
6313 smin_optab
= init_optab (SMIN
);
6314 smax_optab
= init_optab (SMAX
);
6315 umin_optab
= init_optab (UMIN
);
6316 umax_optab
= init_optab (UMAX
);
6317 pow_optab
= init_optab (UNKNOWN
);
6318 atan2_optab
= init_optab (UNKNOWN
);
6320 /* These three have codes assigned exclusively for the sake of
6322 mov_optab
= init_optab (SET
);
6323 movstrict_optab
= init_optab (STRICT_LOW_PART
);
6324 cmp_optab
= init_optab (COMPARE
);
6326 storent_optab
= init_optab (UNKNOWN
);
6328 ucmp_optab
= init_optab (UNKNOWN
);
6329 tst_optab
= init_optab (UNKNOWN
);
6331 eq_optab
= init_optab (EQ
);
6332 ne_optab
= init_optab (NE
);
6333 gt_optab
= init_optab (GT
);
6334 ge_optab
= init_optab (GE
);
6335 lt_optab
= init_optab (LT
);
6336 le_optab
= init_optab (LE
);
6337 unord_optab
= init_optab (UNORDERED
);
6339 neg_optab
= init_optab (NEG
);
6340 ssneg_optab
= init_optab (SS_NEG
);
6341 usneg_optab
= init_optab (US_NEG
);
6342 negv_optab
= init_optabv (NEG
);
6343 abs_optab
= init_optab (ABS
);
6344 absv_optab
= init_optabv (ABS
);
6345 addcc_optab
= init_optab (UNKNOWN
);
6346 one_cmpl_optab
= init_optab (NOT
);
6347 bswap_optab
= init_optab (BSWAP
);
6348 ffs_optab
= init_optab (FFS
);
6349 clz_optab
= init_optab (CLZ
);
6350 ctz_optab
= init_optab (CTZ
);
6351 popcount_optab
= init_optab (POPCOUNT
);
6352 parity_optab
= init_optab (PARITY
);
6353 sqrt_optab
= init_optab (SQRT
);
6354 floor_optab
= init_optab (UNKNOWN
);
6355 ceil_optab
= init_optab (UNKNOWN
);
6356 round_optab
= init_optab (UNKNOWN
);
6357 btrunc_optab
= init_optab (UNKNOWN
);
6358 nearbyint_optab
= init_optab (UNKNOWN
);
6359 rint_optab
= init_optab (UNKNOWN
);
6360 sincos_optab
= init_optab (UNKNOWN
);
6361 sin_optab
= init_optab (UNKNOWN
);
6362 asin_optab
= init_optab (UNKNOWN
);
6363 cos_optab
= init_optab (UNKNOWN
);
6364 acos_optab
= init_optab (UNKNOWN
);
6365 exp_optab
= init_optab (UNKNOWN
);
6366 exp10_optab
= init_optab (UNKNOWN
);
6367 exp2_optab
= init_optab (UNKNOWN
);
6368 expm1_optab
= init_optab (UNKNOWN
);
6369 ldexp_optab
= init_optab (UNKNOWN
);
6370 scalb_optab
= init_optab (UNKNOWN
);
6371 logb_optab
= init_optab (UNKNOWN
);
6372 ilogb_optab
= init_optab (UNKNOWN
);
6373 log_optab
= init_optab (UNKNOWN
);
6374 log10_optab
= init_optab (UNKNOWN
);
6375 log2_optab
= init_optab (UNKNOWN
);
6376 log1p_optab
= init_optab (UNKNOWN
);
6377 tan_optab
= init_optab (UNKNOWN
);
6378 atan_optab
= init_optab (UNKNOWN
);
6379 copysign_optab
= init_optab (UNKNOWN
);
6380 signbit_optab
= init_optab (UNKNOWN
);
6382 isinf_optab
= init_optab (UNKNOWN
);
6384 strlen_optab
= init_optab (UNKNOWN
);
6385 cbranch_optab
= init_optab (UNKNOWN
);
6386 cmov_optab
= init_optab (UNKNOWN
);
6387 cstore_optab
= init_optab (UNKNOWN
);
6388 push_optab
= init_optab (UNKNOWN
);
6390 reduc_smax_optab
= init_optab (UNKNOWN
);
6391 reduc_umax_optab
= init_optab (UNKNOWN
);
6392 reduc_smin_optab
= init_optab (UNKNOWN
);
6393 reduc_umin_optab
= init_optab (UNKNOWN
);
6394 reduc_splus_optab
= init_optab (UNKNOWN
);
6395 reduc_uplus_optab
= init_optab (UNKNOWN
);
6397 ssum_widen_optab
= init_optab (UNKNOWN
);
6398 usum_widen_optab
= init_optab (UNKNOWN
);
6399 sdot_prod_optab
= init_optab (UNKNOWN
);
6400 udot_prod_optab
= init_optab (UNKNOWN
);
6402 vec_extract_optab
= init_optab (UNKNOWN
);
6403 vec_extract_even_optab
= init_optab (UNKNOWN
);
6404 vec_extract_odd_optab
= init_optab (UNKNOWN
);
6405 vec_interleave_high_optab
= init_optab (UNKNOWN
);
6406 vec_interleave_low_optab
= init_optab (UNKNOWN
);
6407 vec_set_optab
= init_optab (UNKNOWN
);
6408 vec_init_optab
= init_optab (UNKNOWN
);
6409 vec_shl_optab
= init_optab (UNKNOWN
);
6410 vec_shr_optab
= init_optab (UNKNOWN
);
6411 vec_realign_load_optab
= init_optab (UNKNOWN
);
6412 movmisalign_optab
= init_optab (UNKNOWN
);
6413 vec_widen_umult_hi_optab
= init_optab (UNKNOWN
);
6414 vec_widen_umult_lo_optab
= init_optab (UNKNOWN
);
6415 vec_widen_smult_hi_optab
= init_optab (UNKNOWN
);
6416 vec_widen_smult_lo_optab
= init_optab (UNKNOWN
);
6417 vec_unpacks_hi_optab
= init_optab (UNKNOWN
);
6418 vec_unpacks_lo_optab
= init_optab (UNKNOWN
);
6419 vec_unpacku_hi_optab
= init_optab (UNKNOWN
);
6420 vec_unpacku_lo_optab
= init_optab (UNKNOWN
);
6421 vec_unpacks_float_hi_optab
= init_optab (UNKNOWN
);
6422 vec_unpacks_float_lo_optab
= init_optab (UNKNOWN
);
6423 vec_unpacku_float_hi_optab
= init_optab (UNKNOWN
);
6424 vec_unpacku_float_lo_optab
= init_optab (UNKNOWN
);
6425 vec_pack_trunc_optab
= init_optab (UNKNOWN
);
6426 vec_pack_usat_optab
= init_optab (UNKNOWN
);
6427 vec_pack_ssat_optab
= init_optab (UNKNOWN
);
6428 vec_pack_ufix_trunc_optab
= init_optab (UNKNOWN
);
6429 vec_pack_sfix_trunc_optab
= init_optab (UNKNOWN
);
6431 powi_optab
= init_optab (UNKNOWN
);
6434 sext_optab
= init_convert_optab (SIGN_EXTEND
);
6435 zext_optab
= init_convert_optab (ZERO_EXTEND
);
6436 trunc_optab
= init_convert_optab (TRUNCATE
);
6437 sfix_optab
= init_convert_optab (FIX
);
6438 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
6439 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
6440 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
6441 sfloat_optab
= init_convert_optab (FLOAT
);
6442 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
6443 lrint_optab
= init_convert_optab (UNKNOWN
);
6444 lround_optab
= init_convert_optab (UNKNOWN
);
6445 lfloor_optab
= init_convert_optab (UNKNOWN
);
6446 lceil_optab
= init_convert_optab (UNKNOWN
);
6448 fract_optab
= init_convert_optab (FRACT_CONVERT
);
6449 fractuns_optab
= init_convert_optab (UNSIGNED_FRACT_CONVERT
);
6450 satfract_optab
= init_convert_optab (SAT_FRACT
);
6451 satfractuns_optab
= init_convert_optab (UNSIGNED_SAT_FRACT
);
6453 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
6455 movmem_optab
[i
] = CODE_FOR_nothing
;
6456 cmpstr_optab
[i
] = CODE_FOR_nothing
;
6457 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
6458 cmpmem_optab
[i
] = CODE_FOR_nothing
;
6459 setmem_optab
[i
] = CODE_FOR_nothing
;
6461 sync_add_optab
[i
] = CODE_FOR_nothing
;
6462 sync_sub_optab
[i
] = CODE_FOR_nothing
;
6463 sync_ior_optab
[i
] = CODE_FOR_nothing
;
6464 sync_and_optab
[i
] = CODE_FOR_nothing
;
6465 sync_xor_optab
[i
] = CODE_FOR_nothing
;
6466 sync_nand_optab
[i
] = CODE_FOR_nothing
;
6467 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
6468 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
6469 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
6470 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
6471 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
6472 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
6473 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
6474 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
6475 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
6476 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
6477 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
6478 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
6479 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
6480 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
6481 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
6482 sync_lock_release
[i
] = CODE_FOR_nothing
;
6484 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
6487 /* Fill in the optabs with the insns we support. */
6490 /* Initialize the optabs with the names of the library functions. */
6491 add_optab
->libcall_basename
= "add";
6492 add_optab
->libcall_suffix
= '3';
6493 add_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6494 addv_optab
->libcall_basename
= "add";
6495 addv_optab
->libcall_suffix
= '3';
6496 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6497 ssadd_optab
->libcall_basename
= "ssadd";
6498 ssadd_optab
->libcall_suffix
= '3';
6499 ssadd_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6500 usadd_optab
->libcall_basename
= "usadd";
6501 usadd_optab
->libcall_suffix
= '3';
6502 usadd_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6503 sub_optab
->libcall_basename
= "sub";
6504 sub_optab
->libcall_suffix
= '3';
6505 sub_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6506 subv_optab
->libcall_basename
= "sub";
6507 subv_optab
->libcall_suffix
= '3';
6508 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6509 sssub_optab
->libcall_basename
= "sssub";
6510 sssub_optab
->libcall_suffix
= '3';
6511 sssub_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6512 ussub_optab
->libcall_basename
= "ussub";
6513 ussub_optab
->libcall_suffix
= '3';
6514 ussub_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6515 smul_optab
->libcall_basename
= "mul";
6516 smul_optab
->libcall_suffix
= '3';
6517 smul_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6518 smulv_optab
->libcall_basename
= "mul";
6519 smulv_optab
->libcall_suffix
= '3';
6520 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6521 ssmul_optab
->libcall_basename
= "ssmul";
6522 ssmul_optab
->libcall_suffix
= '3';
6523 ssmul_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6524 usmul_optab
->libcall_basename
= "usmul";
6525 usmul_optab
->libcall_suffix
= '3';
6526 usmul_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6527 sdiv_optab
->libcall_basename
= "div";
6528 sdiv_optab
->libcall_suffix
= '3';
6529 sdiv_optab
->libcall_gen
= gen_int_fp_signed_fixed_libfunc
;
6530 sdivv_optab
->libcall_basename
= "divv";
6531 sdivv_optab
->libcall_suffix
= '3';
6532 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6533 ssdiv_optab
->libcall_basename
= "ssdiv";
6534 ssdiv_optab
->libcall_suffix
= '3';
6535 ssdiv_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6536 udiv_optab
->libcall_basename
= "udiv";
6537 udiv_optab
->libcall_suffix
= '3';
6538 udiv_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6539 usdiv_optab
->libcall_basename
= "usdiv";
6540 usdiv_optab
->libcall_suffix
= '3';
6541 usdiv_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6542 sdivmod_optab
->libcall_basename
= "divmod";
6543 sdivmod_optab
->libcall_suffix
= '4';
6544 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6545 udivmod_optab
->libcall_basename
= "udivmod";
6546 udivmod_optab
->libcall_suffix
= '4';
6547 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6548 smod_optab
->libcall_basename
= "mod";
6549 smod_optab
->libcall_suffix
= '3';
6550 smod_optab
->libcall_gen
= gen_int_libfunc
;
6551 umod_optab
->libcall_basename
= "umod";
6552 umod_optab
->libcall_suffix
= '3';
6553 umod_optab
->libcall_gen
= gen_int_libfunc
;
6554 ftrunc_optab
->libcall_basename
= "ftrunc";
6555 ftrunc_optab
->libcall_suffix
= '2';
6556 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6557 and_optab
->libcall_basename
= "and";
6558 and_optab
->libcall_suffix
= '3';
6559 and_optab
->libcall_gen
= gen_int_libfunc
;
6560 ior_optab
->libcall_basename
= "ior";
6561 ior_optab
->libcall_suffix
= '3';
6562 ior_optab
->libcall_gen
= gen_int_libfunc
;
6563 xor_optab
->libcall_basename
= "xor";
6564 xor_optab
->libcall_suffix
= '3';
6565 xor_optab
->libcall_gen
= gen_int_libfunc
;
6566 ashl_optab
->libcall_basename
= "ashl";
6567 ashl_optab
->libcall_suffix
= '3';
6568 ashl_optab
->libcall_gen
= gen_int_fixed_libfunc
;
6569 ssashl_optab
->libcall_basename
= "ssashl";
6570 ssashl_optab
->libcall_suffix
= '3';
6571 ssashl_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6572 usashl_optab
->libcall_basename
= "usashl";
6573 usashl_optab
->libcall_suffix
= '3';
6574 usashl_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6575 ashr_optab
->libcall_basename
= "ashr";
6576 ashr_optab
->libcall_suffix
= '3';
6577 ashr_optab
->libcall_gen
= gen_int_signed_fixed_libfunc
;
6578 lshr_optab
->libcall_basename
= "lshr";
6579 lshr_optab
->libcall_suffix
= '3';
6580 lshr_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6581 smin_optab
->libcall_basename
= "min";
6582 smin_optab
->libcall_suffix
= '3';
6583 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6584 smax_optab
->libcall_basename
= "max";
6585 smax_optab
->libcall_suffix
= '3';
6586 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6587 umin_optab
->libcall_basename
= "umin";
6588 umin_optab
->libcall_suffix
= '3';
6589 umin_optab
->libcall_gen
= gen_int_libfunc
;
6590 umax_optab
->libcall_basename
= "umax";
6591 umax_optab
->libcall_suffix
= '3';
6592 umax_optab
->libcall_gen
= gen_int_libfunc
;
6593 neg_optab
->libcall_basename
= "neg";
6594 neg_optab
->libcall_suffix
= '2';
6595 neg_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6596 ssneg_optab
->libcall_basename
= "ssneg";
6597 ssneg_optab
->libcall_suffix
= '2';
6598 ssneg_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6599 usneg_optab
->libcall_basename
= "usneg";
6600 usneg_optab
->libcall_suffix
= '2';
6601 usneg_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6602 negv_optab
->libcall_basename
= "neg";
6603 negv_optab
->libcall_suffix
= '2';
6604 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6605 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6606 one_cmpl_optab
->libcall_suffix
= '2';
6607 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6608 ffs_optab
->libcall_basename
= "ffs";
6609 ffs_optab
->libcall_suffix
= '2';
6610 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6611 clz_optab
->libcall_basename
= "clz";
6612 clz_optab
->libcall_suffix
= '2';
6613 clz_optab
->libcall_gen
= gen_int_libfunc
;
6614 ctz_optab
->libcall_basename
= "ctz";
6615 ctz_optab
->libcall_suffix
= '2';
6616 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6617 popcount_optab
->libcall_basename
= "popcount";
6618 popcount_optab
->libcall_suffix
= '2';
6619 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6620 parity_optab
->libcall_basename
= "parity";
6621 parity_optab
->libcall_suffix
= '2';
6622 parity_optab
->libcall_gen
= gen_int_libfunc
;
6624 /* Comparison libcalls for integers MUST come in pairs,
6626 cmp_optab
->libcall_basename
= "cmp";
6627 cmp_optab
->libcall_suffix
= '2';
6628 cmp_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6629 ucmp_optab
->libcall_basename
= "ucmp";
6630 ucmp_optab
->libcall_suffix
= '2';
6631 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6633 /* EQ etc are floating point only. */
6634 eq_optab
->libcall_basename
= "eq";
6635 eq_optab
->libcall_suffix
= '2';
6636 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6637 ne_optab
->libcall_basename
= "ne";
6638 ne_optab
->libcall_suffix
= '2';
6639 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6640 gt_optab
->libcall_basename
= "gt";
6641 gt_optab
->libcall_suffix
= '2';
6642 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6643 ge_optab
->libcall_basename
= "ge";
6644 ge_optab
->libcall_suffix
= '2';
6645 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6646 lt_optab
->libcall_basename
= "lt";
6647 lt_optab
->libcall_suffix
= '2';
6648 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6649 le_optab
->libcall_basename
= "le";
6650 le_optab
->libcall_suffix
= '2';
6651 le_optab
->libcall_gen
= gen_fp_libfunc
;
6652 unord_optab
->libcall_basename
= "unord";
6653 unord_optab
->libcall_suffix
= '2';
6654 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6656 powi_optab
->libcall_basename
= "powi";
6657 powi_optab
->libcall_suffix
= '2';
6658 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6661 sfloat_optab
->libcall_basename
= "float";
6662 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6663 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6664 sfix_optab
->libcall_basename
= "fix";
6665 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6666 ufix_optab
->libcall_basename
= "fixuns";
6667 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6668 lrint_optab
->libcall_basename
= "lrint";
6669 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6670 lround_optab
->libcall_basename
= "lround";
6671 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6672 lfloor_optab
->libcall_basename
= "lfloor";
6673 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6674 lceil_optab
->libcall_basename
= "lceil";
6675 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6677 /* trunc_optab is also used for FLOAT_EXTEND. */
6678 sext_optab
->libcall_basename
= "extend";
6679 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6680 trunc_optab
->libcall_basename
= "trunc";
6681 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6683 /* Conversions for fixed-point modes and other modes. */
6684 fract_optab
->libcall_basename
= "fract";
6685 fract_optab
->libcall_gen
= gen_fract_conv_libfunc
;
6686 satfract_optab
->libcall_basename
= "satfract";
6687 satfract_optab
->libcall_gen
= gen_satfract_conv_libfunc
;
6688 fractuns_optab
->libcall_basename
= "fractuns";
6689 fractuns_optab
->libcall_gen
= gen_fractuns_conv_libfunc
;
6690 satfractuns_optab
->libcall_basename
= "satfractuns";
6691 satfractuns_optab
->libcall_gen
= gen_satfractuns_conv_libfunc
;
6693 /* The ffs function operates on `int'. Fall back on it if we do not
6694 have a libgcc2 function for that width. */
6695 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6697 int_mode
= mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0);
6698 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6702 /* Explicitly initialize the bswap libfuncs since we need them to be
6703 valid for things other than word_mode. */
6704 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6705 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6707 /* Use cabs for double complex abs, since systems generally have cabs.
6708 Don't define any libcall for float complex, so that cabs will be used. */
6709 if (complex_double_type_node
)
6710 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6712 abort_libfunc
= init_one_libfunc ("abort");
6713 memcpy_libfunc
= init_one_libfunc ("memcpy");
6714 memmove_libfunc
= init_one_libfunc ("memmove");
6715 memcmp_libfunc
= init_one_libfunc ("memcmp");
6716 memset_libfunc
= init_one_libfunc ("memset");
6717 setbits_libfunc
= init_one_libfunc ("__setbits");
6719 #ifndef DONT_USE_BUILTIN_SETJMP
6720 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6721 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6723 setjmp_libfunc
= init_one_libfunc ("setjmp");
6724 longjmp_libfunc
= init_one_libfunc ("longjmp");
6726 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6727 unwind_sjlj_unregister_libfunc
6728 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6730 /* For function entry/exit instrumentation. */
6731 profile_function_entry_libfunc
6732 = init_one_libfunc ("__cyg_profile_func_enter");
6733 profile_function_exit_libfunc
6734 = init_one_libfunc ("__cyg_profile_func_exit");
6736 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6738 if (HAVE_conditional_trap
)
6739 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
6741 /* Allow the target to add more libcalls or rename some, etc. */
6742 targetm
.init_libfuncs ();
6745 /* Print information about the current contents of the optabs on
6749 debug_optab_libfuncs (void)
6755 /* Dump the arithmetic optabs. */
6756 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6757 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6763 l
= optab_libfunc (optab_table
[i
], j
);
6766 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6767 fprintf (stderr
, "%s\t%s:\t%s\n",
6768 GET_RTX_NAME (o
->code
),
6774 /* Dump the conversion optabs. */
6775 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6776 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6777 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6782 o
= convert_optab_table
[i
];
6783 l
= convert_optab_libfunc (o
, j
, k
);
6786 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6787 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6788 GET_RTX_NAME (o
->code
),
6797 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6798 CODE. Return 0 on failure. */
6801 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
6802 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
6804 enum machine_mode mode
= GET_MODE (op1
);
6805 enum insn_code icode
;
6808 if (!HAVE_conditional_trap
)
6811 if (mode
== VOIDmode
)
6814 icode
= optab_handler (cmp_optab
, mode
)->insn_code
;
6815 if (icode
== CODE_FOR_nothing
)
6819 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
6820 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
6826 emit_insn (GEN_FCN (icode
) (op1
, op2
));
6828 PUT_CODE (trap_rtx
, code
);
6829 gcc_assert (HAVE_conditional_trap
);
6830 insn
= gen_conditional_trap (trap_rtx
, tcode
);
6834 insn
= get_insns ();
6841 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6842 or unsigned operation code. */
6844 static enum rtx_code
6845 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6857 code
= unsignedp
? LTU
: LT
;
6860 code
= unsignedp
? LEU
: LE
;
6863 code
= unsignedp
? GTU
: GT
;
6866 code
= unsignedp
? GEU
: GE
;
6869 case UNORDERED_EXPR
:
6900 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6901 unsigned operators. Do not generate compare instruction. */
6904 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6906 enum rtx_code rcode
;
6908 rtx rtx_op0
, rtx_op1
;
6910 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6911 ensures that condition is a relational operation. */
6912 gcc_assert (COMPARISON_CLASS_P (cond
));
6914 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6915 t_op0
= TREE_OPERAND (cond
, 0);
6916 t_op1
= TREE_OPERAND (cond
, 1);
6918 /* Expand operands. */
6919 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6921 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6924 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
6925 && GET_MODE (rtx_op0
) != VOIDmode
)
6926 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
6928 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
6929 && GET_MODE (rtx_op1
) != VOIDmode
)
6930 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
6932 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
6935 /* Return insn code for VEC_COND_EXPR EXPR. */
6937 static inline enum insn_code
6938 get_vcond_icode (tree expr
, enum machine_mode mode
)
6940 enum insn_code icode
= CODE_FOR_nothing
;
6942 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
6943 icode
= vcondu_gen_code
[mode
];
6945 icode
= vcond_gen_code
[mode
];
6949 /* Return TRUE iff, appropriate vector insns are available
6950 for vector cond expr expr in VMODE mode. */
6953 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
6955 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
6960 /* Generate insns for VEC_COND_EXPR. */
6963 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
6965 enum insn_code icode
;
6966 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
6967 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
6968 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
6970 icode
= get_vcond_icode (vec_cond_expr
, mode
);
6971 if (icode
== CODE_FOR_nothing
)
6974 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6975 target
= gen_reg_rtx (mode
);
6977 /* Get comparison rtx. First expand both cond expr operands. */
6978 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
6980 cc_op0
= XEXP (comparison
, 0);
6981 cc_op1
= XEXP (comparison
, 1);
6982 /* Expand both operands and force them in reg, if required. */
6983 rtx_op1
= expand_normal (TREE_OPERAND (vec_cond_expr
, 1));
6984 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
6985 && mode
!= VOIDmode
)
6986 rtx_op1
= force_reg (mode
, rtx_op1
);
6988 rtx_op2
= expand_normal (TREE_OPERAND (vec_cond_expr
, 2));
6989 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
6990 && mode
!= VOIDmode
)
6991 rtx_op2
= force_reg (mode
, rtx_op2
);
6993 /* Emit instruction! */
6994 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
6995 comparison
, cc_op0
, cc_op1
));
7001 /* This is an internal subroutine of the other compare_and_swap expanders.
7002 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
7003 operation. TARGET is an optional place to store the value result of
7004 the operation. ICODE is the particular instruction to expand. Return
7005 the result of the operation. */
7008 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
7009 rtx target
, enum insn_code icode
)
7011 enum machine_mode mode
= GET_MODE (mem
);
7014 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7015 target
= gen_reg_rtx (mode
);
7017 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
7018 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
7019 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
7020 old_val
= force_reg (mode
, old_val
);
7022 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
7023 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
7024 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
7025 new_val
= force_reg (mode
, new_val
);
7027 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
7028 if (insn
== NULL_RTX
)
7035 /* Expand a compare-and-swap operation and return its value. */
7038 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7040 enum machine_mode mode
= GET_MODE (mem
);
7041 enum insn_code icode
= sync_compare_and_swap
[mode
];
7043 if (icode
== CODE_FOR_nothing
)
7046 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
7049 /* Expand a compare-and-swap operation and store true into the result if
7050 the operation was successful and false otherwise. Return the result.
7051 Unlike other routines, TARGET is not optional. */
7054 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7056 enum machine_mode mode
= GET_MODE (mem
);
7057 enum insn_code icode
;
7058 rtx subtarget
, label0
, label1
;
7060 /* If the target supports a compare-and-swap pattern that simultaneously
7061 sets some flag for success, then use it. Otherwise use the regular
7062 compare-and-swap and follow that immediately with a compare insn. */
7063 icode
= sync_compare_and_swap_cc
[mode
];
7067 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7069 if (subtarget
!= NULL_RTX
)
7073 case CODE_FOR_nothing
:
7074 icode
= sync_compare_and_swap
[mode
];
7075 if (icode
== CODE_FOR_nothing
)
7078 /* Ensure that if old_val == mem, that we're not comparing
7079 against an old value. */
7080 if (MEM_P (old_val
))
7081 old_val
= force_reg (mode
, old_val
);
7083 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7085 if (subtarget
== NULL_RTX
)
7088 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
7091 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
7092 setcc instruction from the beginning. We don't work too hard here,
7093 but it's nice to not be stupid about initial code gen either. */
7094 if (STORE_FLAG_VALUE
== 1)
7096 icode
= setcc_gen_code
[EQ
];
7097 if (icode
!= CODE_FOR_nothing
)
7099 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
7103 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
7104 subtarget
= gen_reg_rtx (cmode
);
7106 insn
= GEN_FCN (icode
) (subtarget
);
7110 if (GET_MODE (target
) != GET_MODE (subtarget
))
7112 convert_move (target
, subtarget
, 1);
7120 /* Without an appropriate setcc instruction, use a set of branches to
7121 get 1 and 0 stored into target. Presumably if the target has a
7122 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
7124 label0
= gen_label_rtx ();
7125 label1
= gen_label_rtx ();
7127 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
7128 emit_move_insn (target
, const0_rtx
);
7129 emit_jump_insn (gen_jump (label1
));
7131 emit_label (label0
);
7132 emit_move_insn (target
, const1_rtx
);
7133 emit_label (label1
);
7138 /* This is a helper function for the other atomic operations. This function
7139 emits a loop that contains SEQ that iterates until a compare-and-swap
7140 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7141 a set of instructions that takes a value from OLD_REG as an input and
7142 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7143 set to the current contents of MEM. After SEQ, a compare-and-swap will
7144 attempt to update MEM with NEW_REG. The function returns true when the
7145 loop was generated successfully. */
7148 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7150 enum machine_mode mode
= GET_MODE (mem
);
7151 enum insn_code icode
;
7152 rtx label
, cmp_reg
, subtarget
;
7154 /* The loop we want to generate looks like
7160 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7161 if (cmp_reg != old_reg)
7164 Note that we only do the plain load from memory once. Subsequent
7165 iterations use the value loaded by the compare-and-swap pattern. */
7167 label
= gen_label_rtx ();
7168 cmp_reg
= gen_reg_rtx (mode
);
7170 emit_move_insn (cmp_reg
, mem
);
7172 emit_move_insn (old_reg
, cmp_reg
);
7176 /* If the target supports a compare-and-swap pattern that simultaneously
7177 sets some flag for success, then use it. Otherwise use the regular
7178 compare-and-swap and follow that immediately with a compare insn. */
7179 icode
= sync_compare_and_swap_cc
[mode
];
7183 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7185 if (subtarget
!= NULL_RTX
)
7187 gcc_assert (subtarget
== cmp_reg
);
7192 case CODE_FOR_nothing
:
7193 icode
= sync_compare_and_swap
[mode
];
7194 if (icode
== CODE_FOR_nothing
)
7197 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7199 if (subtarget
== NULL_RTX
)
7201 if (subtarget
!= cmp_reg
)
7202 emit_move_insn (cmp_reg
, subtarget
);
7204 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
7207 /* ??? Mark this jump predicted not taken? */
7208 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
7213 /* This function generates the atomic operation MEM CODE= VAL. In this
7214 case, we do not care about any resulting value. Returns NULL if we
7215 cannot generate the operation. */
7218 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
7220 enum machine_mode mode
= GET_MODE (mem
);
7221 enum insn_code icode
;
7224 /* Look to see if the target supports the operation directly. */
7228 icode
= sync_add_optab
[mode
];
7231 icode
= sync_ior_optab
[mode
];
7234 icode
= sync_xor_optab
[mode
];
7237 icode
= sync_and_optab
[mode
];
7240 icode
= sync_nand_optab
[mode
];
7244 icode
= sync_sub_optab
[mode
];
7245 if (icode
== CODE_FOR_nothing
)
7247 icode
= sync_add_optab
[mode
];
7248 if (icode
!= CODE_FOR_nothing
)
7250 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7260 /* Generate the direct operation, if present. */
7261 if (icode
!= CODE_FOR_nothing
)
7263 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7264 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7265 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
7266 val
= force_reg (mode
, val
);
7268 insn
= GEN_FCN (icode
) (mem
, val
);
7276 /* Failing that, generate a compare-and-swap loop in which we perform the
7277 operation with normal arithmetic instructions. */
7278 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7280 rtx t0
= gen_reg_rtx (mode
), t1
;
7287 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7290 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7291 true, OPTAB_LIB_WIDEN
);
7293 insn
= get_insns ();
7296 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7303 /* This function generates the atomic operation MEM CODE= VAL. In this
7304 case, we do care about the resulting value: if AFTER is true then
7305 return the value MEM holds after the operation, if AFTER is false
7306 then return the value MEM holds before the operation. TARGET is an
7307 optional place for the result value to be stored. */
7310 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
7311 bool after
, rtx target
)
7313 enum machine_mode mode
= GET_MODE (mem
);
7314 enum insn_code old_code
, new_code
, icode
;
7318 /* Look to see if the target supports the operation directly. */
7322 old_code
= sync_old_add_optab
[mode
];
7323 new_code
= sync_new_add_optab
[mode
];
7326 old_code
= sync_old_ior_optab
[mode
];
7327 new_code
= sync_new_ior_optab
[mode
];
7330 old_code
= sync_old_xor_optab
[mode
];
7331 new_code
= sync_new_xor_optab
[mode
];
7334 old_code
= sync_old_and_optab
[mode
];
7335 new_code
= sync_new_and_optab
[mode
];
7338 old_code
= sync_old_nand_optab
[mode
];
7339 new_code
= sync_new_nand_optab
[mode
];
7343 old_code
= sync_old_sub_optab
[mode
];
7344 new_code
= sync_new_sub_optab
[mode
];
7345 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
7347 old_code
= sync_old_add_optab
[mode
];
7348 new_code
= sync_new_add_optab
[mode
];
7349 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
7351 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7361 /* If the target does supports the proper new/old operation, great. But
7362 if we only support the opposite old/new operation, check to see if we
7363 can compensate. In the case in which the old value is supported, then
7364 we can always perform the operation again with normal arithmetic. In
7365 the case in which the new value is supported, then we can only handle
7366 this in the case the operation is reversible. */
7371 if (icode
== CODE_FOR_nothing
)
7374 if (icode
!= CODE_FOR_nothing
)
7381 if (icode
== CODE_FOR_nothing
7382 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
7385 if (icode
!= CODE_FOR_nothing
)
7390 /* If we found something supported, great. */
7391 if (icode
!= CODE_FOR_nothing
)
7393 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7394 target
= gen_reg_rtx (mode
);
7396 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7397 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7398 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7399 val
= force_reg (mode
, val
);
7401 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7406 /* If we need to compensate for using an operation with the
7407 wrong return value, do so now. */
7414 else if (code
== MINUS
)
7419 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
7420 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
7421 true, OPTAB_LIB_WIDEN
);
7428 /* Failing that, generate a compare-and-swap loop in which we perform the
7429 operation with normal arithmetic instructions. */
7430 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7432 rtx t0
= gen_reg_rtx (mode
), t1
;
7434 if (!target
|| !register_operand (target
, mode
))
7435 target
= gen_reg_rtx (mode
);
7440 emit_move_insn (target
, t0
);
7444 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
7447 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7448 true, OPTAB_LIB_WIDEN
);
7450 emit_move_insn (target
, t1
);
7452 insn
= get_insns ();
7455 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7462 /* This function expands a test-and-set operation. Ideally we atomically
7463 store VAL in MEM and return the previous value in MEM. Some targets
7464 may not support this operation and only support VAL with the constant 1;
7465 in this case while the return value will be 0/1, but the exact value
7466 stored in MEM is target defined. TARGET is an option place to stick
7467 the return value. */
7470 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
7472 enum machine_mode mode
= GET_MODE (mem
);
7473 enum insn_code icode
;
7476 /* If the target supports the test-and-set directly, great. */
7477 icode
= sync_lock_test_and_set
[mode
];
7478 if (icode
!= CODE_FOR_nothing
)
7480 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
7481 target
= gen_reg_rtx (mode
);
7483 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7484 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7485 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
7486 val
= force_reg (mode
, val
);
7488 insn
= GEN_FCN (icode
) (target
, mem
, val
);
7496 /* Otherwise, use a compare-and-swap loop for the exchange. */
7497 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
7499 if (!target
|| !register_operand (target
, mode
))
7500 target
= gen_reg_rtx (mode
);
7501 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7502 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7503 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7510 #include "gt-optabs.h"