1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
44 #include "basic-block.h"
47 struct target_optabs default_target_optabs
;
48 struct target_libfuncs default_target_libfuncs
;
50 struct target_optabs
*this_target_optabs
= &default_target_optabs
;
51 struct target_libfuncs
*this_target_libfuncs
= &default_target_libfuncs
;
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab
[NUM_RTX_CODE
+ 1];
60 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
62 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
64 /* Debug facility for use in GDB. */
65 void debug_optab_libfuncs (void);
67 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
68 #if ENABLE_DECIMAL_BID_FORMAT
69 #define DECIMAL_PREFIX "bid_"
71 #define DECIMAL_PREFIX "dpd_"
74 /* Used for libfunc_hash. */
77 hash_libfunc (const void *p
)
79 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
81 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
85 /* Used for libfunc_hash. */
88 eq_libfunc (const void *p
, const void *q
)
90 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
91 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
93 return (e1
->optab
== e2
->optab
94 && e1
->mode1
== e2
->mode1
95 && e1
->mode2
== e2
->mode2
);
98 /* Return libfunc corresponding operation defined by OPTAB converting
99 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
100 if no libfunc is available. */
102 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
103 enum machine_mode mode2
)
105 struct libfunc_entry e
;
106 struct libfunc_entry
**slot
;
108 e
.optab
= (size_t) (optab
- &convert_optab_table
[0]);
111 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
114 if (optab
->libcall_gen
)
116 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
117 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
119 return (*slot
)->libfunc
;
125 return (*slot
)->libfunc
;
128 /* Return libfunc corresponding operation defined by OPTAB in MODE.
129 Trigger lazy initialization if needed, return NULL if no libfunc is
132 optab_libfunc (optab optab
, enum machine_mode mode
)
134 struct libfunc_entry e
;
135 struct libfunc_entry
**slot
;
137 e
.optab
= (size_t) (optab
- &optab_table
[0]);
140 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
143 if (optab
->libcall_gen
)
145 optab
->libcall_gen (optab
, optab
->libcall_basename
,
146 optab
->libcall_suffix
, mode
);
147 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
150 return (*slot
)->libfunc
;
156 return (*slot
)->libfunc
;
160 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
161 the result of operation CODE applied to OP0 (and OP1 if it is a binary
164 If the last insn does not set TARGET, don't do anything, but return 1.
166 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
167 don't add the REG_EQUAL note but return 0. Our caller can then try
168 again, ensuring that TARGET is not one of the operands. */
171 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
173 rtx last_insn
, insn
, set
;
176 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
178 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
179 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
180 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
181 && GET_RTX_CLASS (code
) != RTX_COMPARE
182 && GET_RTX_CLASS (code
) != RTX_UNARY
)
185 if (GET_CODE (target
) == ZERO_EXTRACT
)
188 for (last_insn
= insns
;
189 NEXT_INSN (last_insn
) != NULL_RTX
;
190 last_insn
= NEXT_INSN (last_insn
))
193 set
= single_set (last_insn
);
197 if (! rtx_equal_p (SET_DEST (set
), target
)
198 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
199 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
200 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
203 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
204 besides the last insn. */
205 if (reg_overlap_mentioned_p (target
, op0
)
206 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
208 insn
= PREV_INSN (last_insn
);
209 while (insn
!= NULL_RTX
)
211 if (reg_set_p (target
, insn
))
214 insn
= PREV_INSN (insn
);
218 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
228 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
230 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
231 if (GET_MODE_SIZE (GET_MODE (op0
))
232 > GET_MODE_SIZE (GET_MODE (target
)))
233 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
234 note
, GET_MODE (op0
));
236 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
237 note
, GET_MODE (op0
));
242 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
246 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
248 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
253 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
254 for a widening operation would be. In most cases this would be OP0, but if
255 that's a constant it'll be VOIDmode, which isn't useful. */
257 static enum machine_mode
258 widened_mode (enum machine_mode to_mode
, rtx op0
, rtx op1
)
260 enum machine_mode m0
= GET_MODE (op0
);
261 enum machine_mode m1
= GET_MODE (op1
);
262 enum machine_mode result
;
264 if (m0
== VOIDmode
&& m1
== VOIDmode
)
266 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
271 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
277 /* Find a widening optab even if it doesn't widen as much as we want.
278 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
279 direct HI->SI insn, then return SI->DI, if that exists.
280 If PERMIT_NON_WIDENING is non-zero then this can be used with
281 non-widening optabs also. */
284 find_widening_optab_handler_and_mode (optab op
, enum machine_mode to_mode
,
285 enum machine_mode from_mode
,
286 int permit_non_widening
,
287 enum machine_mode
*found_mode
)
289 for (; (permit_non_widening
|| from_mode
!= to_mode
)
290 && GET_MODE_SIZE (from_mode
) <= GET_MODE_SIZE (to_mode
)
291 && from_mode
!= VOIDmode
;
292 from_mode
= GET_MODE_WIDER_MODE (from_mode
))
294 enum insn_code handler
= widening_optab_handler (op
, to_mode
,
297 if (handler
!= CODE_FOR_nothing
)
300 *found_mode
= from_mode
;
305 return CODE_FOR_nothing
;
308 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
309 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
310 not actually do a sign-extend or zero-extend, but can leave the
311 higher-order bits of the result rtx undefined, for example, in the case
312 of logical operations, but not right shifts. */
315 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
316 int unsignedp
, int no_extend
)
320 /* If we don't have to extend and this is a constant, return it. */
321 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
324 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
325 extend since it will be more efficient to do so unless the signedness of
326 a promoted object differs from our extension. */
328 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
329 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
330 return convert_modes (mode
, oldmode
, op
, unsignedp
);
332 /* If MODE is no wider than a single word, we return a paradoxical
334 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
335 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
337 /* Otherwise, get an object of MODE, clobber it, and set the low-order
340 result
= gen_reg_rtx (mode
);
341 emit_clobber (result
);
342 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
346 /* Return the optab used for computing the operation given by the tree code,
347 CODE and the tree EXP. This function is not always usable (for example, it
348 cannot give complete results for multiplication or division) but probably
349 ought to be relied on more widely throughout the expander. */
351 optab_for_tree_code (enum tree_code code
, const_tree type
,
352 enum optab_subtype subtype
)
364 return one_cmpl_optab
;
373 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
381 if (TYPE_SATURATING(type
))
382 return TYPE_UNSIGNED(type
) ? usdiv_optab
: ssdiv_optab
;
383 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
386 if (TREE_CODE (type
) == VECTOR_TYPE
)
388 if (subtype
== optab_vector
)
389 return TYPE_SATURATING (type
) ? NULL
: vashl_optab
;
391 gcc_assert (subtype
== optab_scalar
);
393 if (TYPE_SATURATING(type
))
394 return TYPE_UNSIGNED(type
) ? usashl_optab
: ssashl_optab
;
398 if (TREE_CODE (type
) == VECTOR_TYPE
)
400 if (subtype
== optab_vector
)
401 return TYPE_UNSIGNED (type
) ? vlshr_optab
: vashr_optab
;
403 gcc_assert (subtype
== optab_scalar
);
405 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
408 if (TREE_CODE (type
) == VECTOR_TYPE
)
410 if (subtype
== optab_vector
)
413 gcc_assert (subtype
== optab_scalar
);
418 if (TREE_CODE (type
) == VECTOR_TYPE
)
420 if (subtype
== optab_vector
)
423 gcc_assert (subtype
== optab_scalar
);
428 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
431 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
433 case REALIGN_LOAD_EXPR
:
434 return vec_realign_load_optab
;
437 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
440 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
442 case WIDEN_MULT_PLUS_EXPR
:
443 return (TYPE_UNSIGNED (type
)
444 ? (TYPE_SATURATING (type
)
445 ? usmadd_widen_optab
: umadd_widen_optab
)
446 : (TYPE_SATURATING (type
)
447 ? ssmadd_widen_optab
: smadd_widen_optab
));
449 case WIDEN_MULT_MINUS_EXPR
:
450 return (TYPE_UNSIGNED (type
)
451 ? (TYPE_SATURATING (type
)
452 ? usmsub_widen_optab
: umsub_widen_optab
)
453 : (TYPE_SATURATING (type
)
454 ? ssmsub_widen_optab
: smsub_widen_optab
));
460 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
463 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
465 case REDUC_PLUS_EXPR
:
466 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
468 case VEC_LSHIFT_EXPR
:
469 return vec_shl_optab
;
471 case VEC_RSHIFT_EXPR
:
472 return vec_shr_optab
;
474 case VEC_WIDEN_MULT_HI_EXPR
:
475 return TYPE_UNSIGNED (type
) ?
476 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
478 case VEC_WIDEN_MULT_LO_EXPR
:
479 return TYPE_UNSIGNED (type
) ?
480 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
482 case VEC_WIDEN_LSHIFT_HI_EXPR
:
483 return TYPE_UNSIGNED (type
) ?
484 vec_widen_ushiftl_hi_optab
: vec_widen_sshiftl_hi_optab
;
486 case VEC_WIDEN_LSHIFT_LO_EXPR
:
487 return TYPE_UNSIGNED (type
) ?
488 vec_widen_ushiftl_lo_optab
: vec_widen_sshiftl_lo_optab
;
490 case VEC_UNPACK_HI_EXPR
:
491 return TYPE_UNSIGNED (type
) ?
492 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
494 case VEC_UNPACK_LO_EXPR
:
495 return TYPE_UNSIGNED (type
) ?
496 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
498 case VEC_UNPACK_FLOAT_HI_EXPR
:
499 /* The signedness is determined from input operand. */
500 return TYPE_UNSIGNED (type
) ?
501 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
503 case VEC_UNPACK_FLOAT_LO_EXPR
:
504 /* The signedness is determined from input operand. */
505 return TYPE_UNSIGNED (type
) ?
506 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
508 case VEC_PACK_TRUNC_EXPR
:
509 return vec_pack_trunc_optab
;
511 case VEC_PACK_SAT_EXPR
:
512 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
514 case VEC_PACK_FIX_TRUNC_EXPR
:
515 /* The signedness is determined from output operand. */
516 return TYPE_UNSIGNED (type
) ?
517 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
523 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
526 case POINTER_PLUS_EXPR
:
528 if (TYPE_SATURATING(type
))
529 return TYPE_UNSIGNED(type
) ? usadd_optab
: ssadd_optab
;
530 return trapv
? addv_optab
: add_optab
;
533 if (TYPE_SATURATING(type
))
534 return TYPE_UNSIGNED(type
) ? ussub_optab
: sssub_optab
;
535 return trapv
? subv_optab
: sub_optab
;
538 if (TYPE_SATURATING(type
))
539 return TYPE_UNSIGNED(type
) ? usmul_optab
: ssmul_optab
;
540 return trapv
? smulv_optab
: smul_optab
;
543 if (TYPE_SATURATING(type
))
544 return TYPE_UNSIGNED(type
) ? usneg_optab
: ssneg_optab
;
545 return trapv
? negv_optab
: neg_optab
;
548 return trapv
? absv_optab
: abs_optab
;
550 case VEC_EXTRACT_EVEN_EXPR
:
551 return vec_extract_even_optab
;
553 case VEC_EXTRACT_ODD_EXPR
:
554 return vec_extract_odd_optab
;
556 case VEC_INTERLEAVE_HIGH_EXPR
:
557 return vec_interleave_high_optab
;
559 case VEC_INTERLEAVE_LOW_EXPR
:
560 return vec_interleave_low_optab
;
568 /* Expand vector widening operations.
570 There are two different classes of operations handled here:
571 1) Operations whose result is wider than all the arguments to the operation.
572 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
573 In this case OP0 and optionally OP1 would be initialized,
574 but WIDE_OP wouldn't (not relevant for this case).
575 2) Operations whose result is of the same size as the last argument to the
576 operation, but wider than all the other arguments to the operation.
577 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
578 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
580 E.g, when called to expand the following operations, this is how
581 the arguments will be initialized:
583 widening-sum 2 oprnd0 - oprnd1
584 widening-dot-product 3 oprnd0 oprnd1 oprnd2
585 widening-mult 2 oprnd0 oprnd1 -
586 type-promotion (vec-unpack) 1 oprnd0 - - */
589 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
590 rtx target
, int unsignedp
)
592 struct expand_operand eops
[4];
593 tree oprnd0
, oprnd1
, oprnd2
;
594 enum machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
595 optab widen_pattern_optab
;
596 enum insn_code icode
;
597 int nops
= TREE_CODE_LENGTH (ops
->code
);
601 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
602 widen_pattern_optab
=
603 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
604 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
605 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
606 icode
= find_widening_optab_handler (widen_pattern_optab
,
607 TYPE_MODE (TREE_TYPE (ops
->op2
)),
610 icode
= optab_handler (widen_pattern_optab
, tmode0
);
611 gcc_assert (icode
!= CODE_FOR_nothing
);
616 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
619 /* The last operand is of a wider mode than the rest of the operands. */
624 gcc_assert (tmode1
== tmode0
);
627 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
631 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
632 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
634 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
636 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
637 expand_insn (icode
, op
, eops
);
638 return eops
[0].value
;
641 /* Generate code to perform an operation specified by TERNARY_OPTAB
642 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
644 UNSIGNEDP is for the case where we have to widen the operands
645 to perform the operation. It says to use zero-extension.
647 If TARGET is nonzero, the value
648 is generated there, if it is convenient to do so.
649 In all cases an rtx is returned for the locus of the value;
650 this may or may not be TARGET. */
653 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
654 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
656 struct expand_operand ops
[4];
657 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
659 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
661 create_output_operand (&ops
[0], target
, mode
);
662 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
663 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
664 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
665 expand_insn (icode
, 4, ops
);
670 /* Like expand_binop, but return a constant rtx if the result can be
671 calculated at compile time. The arguments and return value are
672 otherwise the same as for expand_binop. */
675 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
676 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
677 enum optab_methods methods
)
679 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
681 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
687 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
690 /* Like simplify_expand_binop, but always put the result in TARGET.
691 Return true if the expansion succeeded. */
694 force_expand_binop (enum machine_mode mode
, optab binoptab
,
695 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
696 enum optab_methods methods
)
698 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
699 target
, unsignedp
, methods
);
703 emit_move_insn (target
, x
);
707 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
710 expand_vec_shift_expr (sepops ops
, rtx target
)
712 struct expand_operand eops
[3];
713 enum insn_code icode
;
714 rtx rtx_op1
, rtx_op2
;
715 enum machine_mode mode
= TYPE_MODE (ops
->type
);
716 tree vec_oprnd
= ops
->op0
;
717 tree shift_oprnd
= ops
->op1
;
722 case VEC_RSHIFT_EXPR
:
723 shift_optab
= vec_shr_optab
;
725 case VEC_LSHIFT_EXPR
:
726 shift_optab
= vec_shl_optab
;
732 icode
= optab_handler (shift_optab
, mode
);
733 gcc_assert (icode
!= CODE_FOR_nothing
);
735 rtx_op1
= expand_normal (vec_oprnd
);
736 rtx_op2
= expand_normal (shift_oprnd
);
738 create_output_operand (&eops
[0], target
, mode
);
739 create_input_operand (&eops
[1], rtx_op1
, GET_MODE (rtx_op1
));
740 create_convert_operand_from_type (&eops
[2], rtx_op2
, TREE_TYPE (shift_oprnd
));
741 expand_insn (icode
, 3, eops
);
743 return eops
[0].value
;
746 /* Create a new vector value in VMODE with all elements set to OP. The
747 mode of OP must be the element mode of VMODE. If OP is a constant,
748 then the return value will be a constant. */
751 expand_vector_broadcast (enum machine_mode vmode
, rtx op
)
753 enum insn_code icode
;
758 gcc_checking_assert (VECTOR_MODE_P (vmode
));
760 n
= GET_MODE_NUNITS (vmode
);
761 vec
= rtvec_alloc (n
);
762 for (i
= 0; i
< n
; ++i
)
763 RTVEC_ELT (vec
, i
) = op
;
766 return gen_rtx_CONST_VECTOR (vmode
, vec
);
768 /* ??? If the target doesn't have a vec_init, then we have no easy way
769 of performing this operation. Most of this sort of generic support
770 is hidden away in the vector lowering support in gimple. */
771 icode
= optab_handler (vec_init_optab
, vmode
);
772 if (icode
== CODE_FOR_nothing
)
775 ret
= gen_reg_rtx (vmode
);
776 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
781 /* This subroutine of expand_doubleword_shift handles the cases in which
782 the effective shift value is >= BITS_PER_WORD. The arguments and return
783 value are the same as for the parent routine, except that SUPERWORD_OP1
784 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
785 INTO_TARGET may be null if the caller has decided to calculate it. */
788 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
789 rtx outof_target
, rtx into_target
,
790 int unsignedp
, enum optab_methods methods
)
792 if (into_target
!= 0)
793 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
794 into_target
, unsignedp
, methods
))
797 if (outof_target
!= 0)
799 /* For a signed right shift, we must fill OUTOF_TARGET with copies
800 of the sign bit, otherwise we must fill it with zeros. */
801 if (binoptab
!= ashr_optab
)
802 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
804 if (!force_expand_binop (word_mode
, binoptab
,
805 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
806 outof_target
, unsignedp
, methods
))
812 /* This subroutine of expand_doubleword_shift handles the cases in which
813 the effective shift value is < BITS_PER_WORD. The arguments and return
814 value are the same as for the parent routine. */
817 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
818 rtx outof_input
, rtx into_input
, rtx op1
,
819 rtx outof_target
, rtx into_target
,
820 int unsignedp
, enum optab_methods methods
,
821 unsigned HOST_WIDE_INT shift_mask
)
823 optab reverse_unsigned_shift
, unsigned_shift
;
826 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
827 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
829 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
830 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
831 the opposite direction to BINOPTAB. */
832 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
834 carries
= outof_input
;
835 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
836 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
841 /* We must avoid shifting by BITS_PER_WORD bits since that is either
842 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
843 has unknown behavior. Do a single shift first, then shift by the
844 remainder. It's OK to use ~OP1 as the remainder if shift counts
845 are truncated to the mode size. */
846 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
847 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
848 if (shift_mask
== BITS_PER_WORD
- 1)
850 tmp
= immed_double_const (-1, -1, op1_mode
);
851 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
856 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
857 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
861 if (tmp
== 0 || carries
== 0)
863 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
864 carries
, tmp
, 0, unsignedp
, methods
);
868 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
869 so the result can go directly into INTO_TARGET if convenient. */
870 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
871 into_target
, unsignedp
, methods
);
875 /* Now OR in the bits carried over from OUTOF_INPUT. */
876 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
877 into_target
, unsignedp
, methods
))
880 /* Use a standard word_mode shift for the out-of half. */
881 if (outof_target
!= 0)
882 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
883 outof_target
, unsignedp
, methods
))
890 #ifdef HAVE_conditional_move
891 /* Try implementing expand_doubleword_shift using conditional moves.
892 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
893 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
894 are the shift counts to use in the former and latter case. All other
895 arguments are the same as the parent routine. */
898 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
899 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
900 rtx outof_input
, rtx into_input
,
901 rtx subword_op1
, rtx superword_op1
,
902 rtx outof_target
, rtx into_target
,
903 int unsignedp
, enum optab_methods methods
,
904 unsigned HOST_WIDE_INT shift_mask
)
906 rtx outof_superword
, into_superword
;
908 /* Put the superword version of the output into OUTOF_SUPERWORD and
910 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
911 if (outof_target
!= 0 && subword_op1
== superword_op1
)
913 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
914 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
915 into_superword
= outof_target
;
916 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
917 outof_superword
, 0, unsignedp
, methods
))
922 into_superword
= gen_reg_rtx (word_mode
);
923 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
924 outof_superword
, into_superword
,
929 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
930 if (!expand_subword_shift (op1_mode
, binoptab
,
931 outof_input
, into_input
, subword_op1
,
932 outof_target
, into_target
,
933 unsignedp
, methods
, shift_mask
))
936 /* Select between them. Do the INTO half first because INTO_SUPERWORD
937 might be the current value of OUTOF_TARGET. */
938 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
939 into_target
, into_superword
, word_mode
, false))
942 if (outof_target
!= 0)
943 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
944 outof_target
, outof_superword
,
952 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
953 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
954 input operand; the shift moves bits in the direction OUTOF_INPUT->
955 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
956 of the target. OP1 is the shift count and OP1_MODE is its mode.
957 If OP1 is constant, it will have been truncated as appropriate
958 and is known to be nonzero.
960 If SHIFT_MASK is zero, the result of word shifts is undefined when the
961 shift count is outside the range [0, BITS_PER_WORD). This routine must
962 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
964 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
965 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
966 fill with zeros or sign bits as appropriate.
968 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
969 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
970 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
971 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
974 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
975 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
976 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
977 function wants to calculate it itself.
979 Return true if the shift could be successfully synthesized. */
982 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
983 rtx outof_input
, rtx into_input
, rtx op1
,
984 rtx outof_target
, rtx into_target
,
985 int unsignedp
, enum optab_methods methods
,
986 unsigned HOST_WIDE_INT shift_mask
)
988 rtx superword_op1
, tmp
, cmp1
, cmp2
;
989 rtx subword_label
, done_label
;
990 enum rtx_code cmp_code
;
992 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
993 fill the result with sign or zero bits as appropriate. If so, the value
994 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
995 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
996 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
998 This isn't worthwhile for constant shifts since the optimizers will
999 cope better with in-range shift counts. */
1000 if (shift_mask
>= BITS_PER_WORD
1001 && outof_target
!= 0
1002 && !CONSTANT_P (op1
))
1004 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1005 outof_input
, into_input
, op1
,
1007 unsignedp
, methods
, shift_mask
))
1009 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1010 outof_target
, unsignedp
, methods
))
1015 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1016 is true when the effective shift value is less than BITS_PER_WORD.
1017 Set SUPERWORD_OP1 to the shift count that should be used to shift
1018 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1019 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1020 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1022 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1023 is a subword shift count. */
1024 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1026 cmp2
= CONST0_RTX (op1_mode
);
1028 superword_op1
= op1
;
1032 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1033 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1035 cmp2
= CONST0_RTX (op1_mode
);
1037 superword_op1
= cmp1
;
1042 /* If we can compute the condition at compile time, pick the
1043 appropriate subroutine. */
1044 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1045 if (tmp
!= 0 && CONST_INT_P (tmp
))
1047 if (tmp
== const0_rtx
)
1048 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1049 outof_target
, into_target
,
1050 unsignedp
, methods
);
1052 return expand_subword_shift (op1_mode
, binoptab
,
1053 outof_input
, into_input
, op1
,
1054 outof_target
, into_target
,
1055 unsignedp
, methods
, shift_mask
);
1058 #ifdef HAVE_conditional_move
1059 /* Try using conditional moves to generate straight-line code. */
1061 rtx start
= get_last_insn ();
1062 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1063 cmp_code
, cmp1
, cmp2
,
1064 outof_input
, into_input
,
1066 outof_target
, into_target
,
1067 unsignedp
, methods
, shift_mask
))
1069 delete_insns_since (start
);
1073 /* As a last resort, use branches to select the correct alternative. */
1074 subword_label
= gen_label_rtx ();
1075 done_label
= gen_label_rtx ();
1078 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1079 0, 0, subword_label
, -1);
1082 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1083 outof_target
, into_target
,
1084 unsignedp
, methods
))
1087 emit_jump_insn (gen_jump (done_label
));
1089 emit_label (subword_label
);
1091 if (!expand_subword_shift (op1_mode
, binoptab
,
1092 outof_input
, into_input
, op1
,
1093 outof_target
, into_target
,
1094 unsignedp
, methods
, shift_mask
))
1097 emit_label (done_label
);
1101 /* Subroutine of expand_binop. Perform a double word multiplication of
1102 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1103 as the target's word_mode. This function return NULL_RTX if anything
1104 goes wrong, in which case it may have already emitted instructions
1105 which need to be deleted.
1107 If we want to multiply two two-word values and have normal and widening
1108 multiplies of single-word values, we can do this with three smaller
1111 The multiplication proceeds as follows:
1112 _______________________
1113 [__op0_high_|__op0_low__]
1114 _______________________
1115 * [__op1_high_|__op1_low__]
1116 _______________________________________________
1117 _______________________
1118 (1) [__op0_low__*__op1_low__]
1119 _______________________
1120 (2a) [__op0_low__*__op1_high_]
1121 _______________________
1122 (2b) [__op0_high_*__op1_low__]
1123 _______________________
1124 (3) [__op0_high_*__op1_high_]
1127 This gives a 4-word result. Since we are only interested in the
1128 lower 2 words, partial result (3) and the upper words of (2a) and
1129 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1130 calculated using non-widening multiplication.
1132 (1), however, needs to be calculated with an unsigned widening
1133 multiplication. If this operation is not directly supported we
1134 try using a signed widening multiplication and adjust the result.
1135 This adjustment works as follows:
1137 If both operands are positive then no adjustment is needed.
1139 If the operands have different signs, for example op0_low < 0 and
1140 op1_low >= 0, the instruction treats the most significant bit of
1141 op0_low as a sign bit instead of a bit with significance
1142 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1143 with 2**BITS_PER_WORD - op0_low, and two's complements the
1144 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1147 Similarly, if both operands are negative, we need to add
1148 (op0_low + op1_low) * 2**BITS_PER_WORD.
1150 We use a trick to adjust quickly. We logically shift op0_low right
1151 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1152 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1153 logical shift exists, we do an arithmetic right shift and subtract
1157 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1158 bool umulp
, enum optab_methods methods
)
1160 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1161 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1162 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1163 rtx product
, adjust
, product_high
, temp
;
1165 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1166 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1167 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1168 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1170 /* If we're using an unsigned multiply to directly compute the product
1171 of the low-order words of the operands and perform any required
1172 adjustments of the operands, we begin by trying two more multiplications
1173 and then computing the appropriate sum.
1175 We have checked above that the required addition is provided.
1176 Full-word addition will normally always succeed, especially if
1177 it is provided at all, so we don't worry about its failure. The
1178 multiplication may well fail, however, so we do handle that. */
1182 /* ??? This could be done with emit_store_flag where available. */
1183 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1184 NULL_RTX
, 1, methods
);
1186 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1187 NULL_RTX
, 0, OPTAB_DIRECT
);
1190 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1191 NULL_RTX
, 0, methods
);
1194 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1195 NULL_RTX
, 0, OPTAB_DIRECT
);
1202 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1203 NULL_RTX
, 0, OPTAB_DIRECT
);
1207 /* OP0_HIGH should now be dead. */
1211 /* ??? This could be done with emit_store_flag where available. */
1212 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1213 NULL_RTX
, 1, methods
);
1215 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1216 NULL_RTX
, 0, OPTAB_DIRECT
);
1219 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1220 NULL_RTX
, 0, methods
);
1223 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1224 NULL_RTX
, 0, OPTAB_DIRECT
);
1231 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1232 NULL_RTX
, 0, OPTAB_DIRECT
);
1236 /* OP1_HIGH should now be dead. */
1238 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1239 NULL_RTX
, 0, OPTAB_DIRECT
);
1241 if (target
&& !REG_P (target
))
1245 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1246 target
, 1, OPTAB_DIRECT
);
1248 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1249 target
, 1, OPTAB_DIRECT
);
1254 product_high
= operand_subword (product
, high
, 1, mode
);
1255 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1256 NULL_RTX
, 0, OPTAB_DIRECT
);
1257 emit_move_insn (product_high
, adjust
);
1261 /* Wrapper around expand_binop which takes an rtx code to specify
1262 the operation to perform, not an optab pointer. All other
1263 arguments are the same. */
1265 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1266 rtx op1
, rtx target
, int unsignedp
,
1267 enum optab_methods methods
)
1269 optab binop
= code_to_optab
[(int) code
];
1272 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1275 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1276 binop. Order them according to commutative_operand_precedence and, if
1277 possible, try to put TARGET or a pseudo first. */
1279 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1281 int op0_prec
= commutative_operand_precedence (op0
);
1282 int op1_prec
= commutative_operand_precedence (op1
);
1284 if (op0_prec
< op1_prec
)
1287 if (op0_prec
> op1_prec
)
1290 /* With equal precedence, both orders are ok, but it is better if the
1291 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1292 if (target
== 0 || REG_P (target
))
1293 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1295 return rtx_equal_p (op1
, target
);
1298 /* Return true if BINOPTAB implements a shift operation. */
1301 shift_optab_p (optab binoptab
)
1303 switch (binoptab
->code
)
1319 /* Return true if BINOPTAB implements a commutative binary operation. */
1322 commutative_optab_p (optab binoptab
)
1324 return (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1325 || binoptab
== smul_widen_optab
1326 || binoptab
== umul_widen_optab
1327 || binoptab
== smul_highpart_optab
1328 || binoptab
== umul_highpart_optab
);
1331 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1332 optimizing, and if the operand is a constant that costs more than
1333 1 instruction, force the constant into a register and return that
1334 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1337 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1338 int opn
, rtx x
, bool unsignedp
)
1340 bool speed
= optimize_insn_for_speed_p ();
1342 if (mode
!= VOIDmode
1345 && rtx_cost (x
, binoptab
->code
, opn
, speed
) > set_src_cost (x
, speed
))
1347 if (CONST_INT_P (x
))
1349 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1350 if (intval
!= INTVAL (x
))
1351 x
= GEN_INT (intval
);
1354 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1355 x
= force_reg (mode
, x
);
1360 /* Helper function for expand_binop: handle the case where there
1361 is an insn that directly implements the indicated operation.
1362 Returns null if this is not possible. */
1364 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1366 rtx target
, int unsignedp
, enum optab_methods methods
,
1369 enum machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1370 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
1372 enum machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1373 enum machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1374 enum machine_mode mode0
, mode1
, tmp_mode
;
1375 struct expand_operand ops
[3];
1378 rtx xop0
= op0
, xop1
= op1
;
1381 /* If it is a commutative operator and the modes would match
1382 if we would swap the operands, we can save the conversions. */
1383 commutative_p
= commutative_optab_p (binoptab
);
1385 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1386 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1393 /* If we are optimizing, force expensive constants into a register. */
1394 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1395 if (!shift_optab_p (binoptab
))
1396 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1398 /* In case the insn wants input operands in modes different from
1399 those of the actual operands, convert the operands. It would
1400 seem that we don't need to convert CONST_INTs, but we do, so
1401 that they're properly zero-extended, sign-extended or truncated
1404 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1405 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1407 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1411 mode1
= GET_MODE (xop1
) != VOIDmode
? GET_MODE (xop1
) : mode
;
1412 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1414 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1418 /* If operation is commutative,
1419 try to make the first operand a register.
1420 Even better, try to make it the same as the target.
1421 Also try to make the last operand a constant. */
1423 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1430 /* Now, if insn's predicates don't allow our operands, put them into
1433 if (binoptab
== vec_pack_trunc_optab
1434 || binoptab
== vec_pack_usat_optab
1435 || binoptab
== vec_pack_ssat_optab
1436 || binoptab
== vec_pack_ufix_trunc_optab
1437 || binoptab
== vec_pack_sfix_trunc_optab
)
1439 /* The mode of the result is different then the mode of the
1441 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1442 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1444 delete_insns_since (last
);
1451 create_output_operand (&ops
[0], target
, tmp_mode
);
1452 create_input_operand (&ops
[1], xop0
, mode0
);
1453 create_input_operand (&ops
[2], xop1
, mode1
);
1454 pat
= maybe_gen_insn (icode
, 3, ops
);
1457 /* If PAT is composed of more than one insn, try to add an appropriate
1458 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1459 operand, call expand_binop again, this time without a target. */
1460 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1461 && ! add_equal_note (pat
, ops
[0].value
, binoptab
->code
,
1462 ops
[1].value
, ops
[2].value
))
1464 delete_insns_since (last
);
1465 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1466 unsignedp
, methods
);
1470 return ops
[0].value
;
1472 delete_insns_since (last
);
1476 /* Generate code to perform an operation specified by BINOPTAB
1477 on operands OP0 and OP1, with result having machine-mode MODE.
1479 UNSIGNEDP is for the case where we have to widen the operands
1480 to perform the operation. It says to use zero-extension.
1482 If TARGET is nonzero, the value
1483 is generated there, if it is convenient to do so.
1484 In all cases an rtx is returned for the locus of the value;
1485 this may or may not be TARGET. */
1488 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1489 rtx target
, int unsignedp
, enum optab_methods methods
)
1491 enum optab_methods next_methods
1492 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1493 ? OPTAB_WIDEN
: methods
);
1494 enum mode_class mclass
;
1495 enum machine_mode wider_mode
;
1498 rtx entry_last
= get_last_insn ();
1501 mclass
= GET_MODE_CLASS (mode
);
1503 /* If subtracting an integer constant, convert this into an addition of
1504 the negated constant. */
1506 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1508 op1
= negate_rtx (mode
, op1
);
1509 binoptab
= add_optab
;
1512 /* Record where to delete back to if we backtrack. */
1513 last
= get_last_insn ();
1515 /* If we can do it with a three-operand insn, do so. */
1517 if (methods
!= OPTAB_MUST_WIDEN
1518 && find_widening_optab_handler (binoptab
, mode
,
1519 widened_mode (mode
, op0
, op1
), 1)
1520 != CODE_FOR_nothing
)
1522 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1523 unsignedp
, methods
, last
);
1528 /* If we were trying to rotate, and that didn't work, try rotating
1529 the other direction before falling back to shifts and bitwise-or. */
1530 if (((binoptab
== rotl_optab
1531 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1532 || (binoptab
== rotr_optab
1533 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1534 && mclass
== MODE_INT
)
1536 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1538 unsigned int bits
= GET_MODE_PRECISION (mode
);
1540 if (CONST_INT_P (op1
))
1541 newop1
= GEN_INT (bits
- INTVAL (op1
));
1542 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1543 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1545 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1546 GEN_INT (bits
), op1
,
1547 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1549 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1550 target
, unsignedp
, methods
, last
);
1555 /* If this is a multiply, see if we can do a widening operation that
1556 takes operands of this mode and makes a wider mode. */
1558 if (binoptab
== smul_optab
1559 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1560 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1561 : smul_widen_optab
),
1562 GET_MODE_2XWIDER_MODE (mode
), mode
)
1563 != CODE_FOR_nothing
))
1565 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1566 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1567 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1571 if (GET_MODE_CLASS (mode
) == MODE_INT
1572 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1573 return gen_lowpart (mode
, temp
);
1575 return convert_to_mode (mode
, temp
, unsignedp
);
1579 /* If this is a vector shift by a scalar, see if we can do a vector
1580 shift by a vector. If so, broadcast the scalar into a vector. */
1581 if (mclass
== MODE_VECTOR_INT
)
1583 optab otheroptab
= NULL
;
1585 if (binoptab
== ashl_optab
)
1586 otheroptab
= vashl_optab
;
1587 else if (binoptab
== ashr_optab
)
1588 otheroptab
= vashr_optab
;
1589 else if (binoptab
== lshr_optab
)
1590 otheroptab
= vlshr_optab
;
1591 else if (binoptab
== rotl_optab
)
1592 otheroptab
= vrotl_optab
;
1593 else if (binoptab
== rotr_optab
)
1594 otheroptab
= vrotr_optab
;
1596 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1598 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1601 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1602 target
, unsignedp
, methods
, last
);
1609 /* Look for a wider mode of the same class for which we think we
1610 can open-code the operation. Check for a widening multiply at the
1611 wider mode as well. */
1613 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1614 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1615 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1616 wider_mode
!= VOIDmode
;
1617 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1619 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1620 || (binoptab
== smul_optab
1621 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1622 && (find_widening_optab_handler ((unsignedp
1624 : smul_widen_optab
),
1625 GET_MODE_WIDER_MODE (wider_mode
),
1627 != CODE_FOR_nothing
)))
1629 rtx xop0
= op0
, xop1
= op1
;
1632 /* For certain integer operations, we need not actually extend
1633 the narrow operands, as long as we will truncate
1634 the results to the same narrowness. */
1636 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1637 || binoptab
== xor_optab
1638 || binoptab
== add_optab
|| binoptab
== sub_optab
1639 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1640 && mclass
== MODE_INT
)
1643 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1645 if (binoptab
!= ashl_optab
)
1646 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1650 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1652 /* The second operand of a shift must always be extended. */
1653 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1654 no_extend
&& binoptab
!= ashl_optab
);
1656 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1657 unsignedp
, OPTAB_DIRECT
);
1660 if (mclass
!= MODE_INT
1661 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1664 target
= gen_reg_rtx (mode
);
1665 convert_move (target
, temp
, 0);
1669 return gen_lowpart (mode
, temp
);
1672 delete_insns_since (last
);
1676 /* If operation is commutative,
1677 try to make the first operand a register.
1678 Even better, try to make it the same as the target.
1679 Also try to make the last operand a constant. */
1680 if (commutative_optab_p (binoptab
)
1681 && swap_commutative_operands_with_target (target
, op0
, op1
))
1688 /* These can be done a word at a time. */
1689 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1690 && mclass
== MODE_INT
1691 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1692 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1697 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1698 won't be accurate, so use a new target. */
1702 || !valid_multiword_target_p (target
))
1703 target
= gen_reg_rtx (mode
);
1707 /* Do the actual arithmetic. */
1708 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1710 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1711 rtx x
= expand_binop (word_mode
, binoptab
,
1712 operand_subword_force (op0
, i
, mode
),
1713 operand_subword_force (op1
, i
, mode
),
1714 target_piece
, unsignedp
, next_methods
);
1719 if (target_piece
!= x
)
1720 emit_move_insn (target_piece
, x
);
1723 insns
= get_insns ();
1726 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1733 /* Synthesize double word shifts from single word shifts. */
1734 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1735 || binoptab
== ashr_optab
)
1736 && mclass
== MODE_INT
1737 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1738 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1739 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1740 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1741 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1742 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1744 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1745 enum machine_mode op1_mode
;
1747 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1748 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1749 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1751 /* Apply the truncation to constant shifts. */
1752 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1753 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1755 if (op1
== CONST0_RTX (op1_mode
))
1758 /* Make sure that this is a combination that expand_doubleword_shift
1759 can handle. See the comments there for details. */
1760 if (double_shift_mask
== 0
1761 || (shift_mask
== BITS_PER_WORD
- 1
1762 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1765 rtx into_target
, outof_target
;
1766 rtx into_input
, outof_input
;
1767 int left_shift
, outof_word
;
1769 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1770 won't be accurate, so use a new target. */
1774 || !valid_multiword_target_p (target
))
1775 target
= gen_reg_rtx (mode
);
1779 /* OUTOF_* is the word we are shifting bits away from, and
1780 INTO_* is the word that we are shifting bits towards, thus
1781 they differ depending on the direction of the shift and
1782 WORDS_BIG_ENDIAN. */
1784 left_shift
= binoptab
== ashl_optab
;
1785 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1787 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1788 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1790 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1791 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1793 if (expand_doubleword_shift (op1_mode
, binoptab
,
1794 outof_input
, into_input
, op1
,
1795 outof_target
, into_target
,
1796 unsignedp
, next_methods
, shift_mask
))
1798 insns
= get_insns ();
1808 /* Synthesize double word rotates from single word shifts. */
1809 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1810 && mclass
== MODE_INT
1811 && CONST_INT_P (op1
)
1812 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1813 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1814 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1817 rtx into_target
, outof_target
;
1818 rtx into_input
, outof_input
;
1820 int shift_count
, left_shift
, outof_word
;
1822 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1823 won't be accurate, so use a new target. Do this also if target is not
1824 a REG, first because having a register instead may open optimization
1825 opportunities, and second because if target and op0 happen to be MEMs
1826 designating the same location, we would risk clobbering it too early
1827 in the code sequence we generate below. */
1832 || !valid_multiword_target_p (target
))
1833 target
= gen_reg_rtx (mode
);
1837 shift_count
= INTVAL (op1
);
1839 /* OUTOF_* is the word we are shifting bits away from, and
1840 INTO_* is the word that we are shifting bits towards, thus
1841 they differ depending on the direction of the shift and
1842 WORDS_BIG_ENDIAN. */
1844 left_shift
= (binoptab
== rotl_optab
);
1845 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1847 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1848 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1850 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1851 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1853 if (shift_count
== BITS_PER_WORD
)
1855 /* This is just a word swap. */
1856 emit_move_insn (outof_target
, into_input
);
1857 emit_move_insn (into_target
, outof_input
);
1862 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1863 rtx first_shift_count
, second_shift_count
;
1864 optab reverse_unsigned_shift
, unsigned_shift
;
1866 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1867 ? lshr_optab
: ashl_optab
);
1869 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1870 ? ashl_optab
: lshr_optab
);
1872 if (shift_count
> BITS_PER_WORD
)
1874 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1875 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1879 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1880 second_shift_count
= GEN_INT (shift_count
);
1883 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1884 outof_input
, first_shift_count
,
1885 NULL_RTX
, unsignedp
, next_methods
);
1886 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1887 into_input
, second_shift_count
,
1888 NULL_RTX
, unsignedp
, next_methods
);
1890 if (into_temp1
!= 0 && into_temp2
!= 0)
1891 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1892 into_target
, unsignedp
, next_methods
);
1896 if (inter
!= 0 && inter
!= into_target
)
1897 emit_move_insn (into_target
, inter
);
1899 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1900 into_input
, first_shift_count
,
1901 NULL_RTX
, unsignedp
, next_methods
);
1902 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1903 outof_input
, second_shift_count
,
1904 NULL_RTX
, unsignedp
, next_methods
);
1906 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1907 inter
= expand_binop (word_mode
, ior_optab
,
1908 outof_temp1
, outof_temp2
,
1909 outof_target
, unsignedp
, next_methods
);
1911 if (inter
!= 0 && inter
!= outof_target
)
1912 emit_move_insn (outof_target
, inter
);
1915 insns
= get_insns ();
1925 /* These can be done a word at a time by propagating carries. */
1926 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1927 && mclass
== MODE_INT
1928 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1929 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1932 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1933 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1934 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1935 rtx xop0
, xop1
, xtarget
;
1937 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1938 value is one of those, use it. Otherwise, use 1 since it is the
1939 one easiest to get. */
1940 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1941 int normalizep
= STORE_FLAG_VALUE
;
1946 /* Prepare the operands. */
1947 xop0
= force_reg (mode
, op0
);
1948 xop1
= force_reg (mode
, op1
);
1950 xtarget
= gen_reg_rtx (mode
);
1952 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1955 /* Indicate for flow that the entire target reg is being set. */
1957 emit_clobber (xtarget
);
1959 /* Do the actual arithmetic. */
1960 for (i
= 0; i
< nwords
; i
++)
1962 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1963 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1964 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1965 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1968 /* Main add/subtract of the input operands. */
1969 x
= expand_binop (word_mode
, binoptab
,
1970 op0_piece
, op1_piece
,
1971 target_piece
, unsignedp
, next_methods
);
1977 /* Store carry from main add/subtract. */
1978 carry_out
= gen_reg_rtx (word_mode
);
1979 carry_out
= emit_store_flag_force (carry_out
,
1980 (binoptab
== add_optab
1983 word_mode
, 1, normalizep
);
1990 /* Add/subtract previous carry to main result. */
1991 newx
= expand_binop (word_mode
,
1992 normalizep
== 1 ? binoptab
: otheroptab
,
1994 NULL_RTX
, 1, next_methods
);
1998 /* Get out carry from adding/subtracting carry in. */
1999 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2000 carry_tmp
= emit_store_flag_force (carry_tmp
,
2001 (binoptab
== add_optab
2004 word_mode
, 1, normalizep
);
2006 /* Logical-ior the two poss. carry together. */
2007 carry_out
= expand_binop (word_mode
, ior_optab
,
2008 carry_out
, carry_tmp
,
2009 carry_out
, 0, next_methods
);
2013 emit_move_insn (target_piece
, newx
);
2017 if (x
!= target_piece
)
2018 emit_move_insn (target_piece
, x
);
2021 carry_in
= carry_out
;
2024 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2026 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
2027 || ! rtx_equal_p (target
, xtarget
))
2029 rtx temp
= emit_move_insn (target
, xtarget
);
2031 set_unique_reg_note (temp
,
2033 gen_rtx_fmt_ee (binoptab
->code
, mode
,
2044 delete_insns_since (last
);
2047 /* Attempt to synthesize double word multiplies using a sequence of word
2048 mode multiplications. We first attempt to generate a sequence using a
2049 more efficient unsigned widening multiply, and if that fails we then
2050 try using a signed widening multiply. */
2052 if (binoptab
== smul_optab
2053 && mclass
== MODE_INT
2054 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2055 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2056 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2058 rtx product
= NULL_RTX
;
2059 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
2060 != CODE_FOR_nothing
)
2062 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2065 delete_insns_since (last
);
2068 if (product
== NULL_RTX
2069 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
2070 != CODE_FOR_nothing
)
2072 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2075 delete_insns_since (last
);
2078 if (product
!= NULL_RTX
)
2080 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
2082 temp
= emit_move_insn (target
? target
: product
, product
);
2083 set_unique_reg_note (temp
,
2085 gen_rtx_fmt_ee (MULT
, mode
,
2093 /* It can't be open-coded in this mode.
2094 Use a library call if one is available and caller says that's ok. */
2096 libfunc
= optab_libfunc (binoptab
, mode
);
2098 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2102 enum machine_mode op1_mode
= mode
;
2107 if (shift_optab_p (binoptab
))
2109 op1_mode
= targetm
.libgcc_shift_count_mode ();
2110 /* Specify unsigned here,
2111 since negative shift counts are meaningless. */
2112 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2115 if (GET_MODE (op0
) != VOIDmode
2116 && GET_MODE (op0
) != mode
)
2117 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2119 /* Pass 1 for NO_QUEUE so we don't lose any increments
2120 if the libcall is cse'd or moved. */
2121 value
= emit_library_call_value (libfunc
,
2122 NULL_RTX
, LCT_CONST
, mode
, 2,
2123 op0
, mode
, op1x
, op1_mode
);
2125 insns
= get_insns ();
2128 target
= gen_reg_rtx (mode
);
2129 emit_libcall_block (insns
, target
, value
,
2130 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
2135 delete_insns_since (last
);
2137 /* It can't be done in this mode. Can we do it in a wider mode? */
2139 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2140 || methods
== OPTAB_MUST_WIDEN
))
2142 /* Caller says, don't even try. */
2143 delete_insns_since (entry_last
);
2147 /* Compute the value of METHODS to pass to recursive calls.
2148 Don't allow widening to be tried recursively. */
2150 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2152 /* Look for a wider mode of the same class for which it appears we can do
2155 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2157 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2158 wider_mode
!= VOIDmode
;
2159 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2161 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
2163 || (methods
== OPTAB_LIB
2164 && optab_libfunc (binoptab
, wider_mode
)))
2166 rtx xop0
= op0
, xop1
= op1
;
2169 /* For certain integer operations, we need not actually extend
2170 the narrow operands, as long as we will truncate
2171 the results to the same narrowness. */
2173 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2174 || binoptab
== xor_optab
2175 || binoptab
== add_optab
|| binoptab
== sub_optab
2176 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2177 && mclass
== MODE_INT
)
2180 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2181 unsignedp
, no_extend
);
2183 /* The second operand of a shift must always be extended. */
2184 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2185 no_extend
&& binoptab
!= ashl_optab
);
2187 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2188 unsignedp
, methods
);
2191 if (mclass
!= MODE_INT
2192 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2195 target
= gen_reg_rtx (mode
);
2196 convert_move (target
, temp
, 0);
2200 return gen_lowpart (mode
, temp
);
2203 delete_insns_since (last
);
2208 delete_insns_since (entry_last
);
2212 /* Expand a binary operator which has both signed and unsigned forms.
2213 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2216 If we widen unsigned operands, we may use a signed wider operation instead
2217 of an unsigned wider operation, since the result would be the same. */
2220 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2221 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2222 enum optab_methods methods
)
2225 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2226 struct optab_d wide_soptab
;
2228 /* Do it without widening, if possible. */
2229 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2230 unsignedp
, OPTAB_DIRECT
);
2231 if (temp
|| methods
== OPTAB_DIRECT
)
2234 /* Try widening to a signed int. Make a fake signed optab that
2235 hides any signed insn for direct use. */
2236 wide_soptab
= *soptab
;
2237 set_optab_handler (&wide_soptab
, mode
, CODE_FOR_nothing
);
2238 /* We don't want to generate new hash table entries from this fake
2240 wide_soptab
.libcall_gen
= NULL
;
2242 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2243 unsignedp
, OPTAB_WIDEN
);
2245 /* For unsigned operands, try widening to an unsigned int. */
2246 if (temp
== 0 && unsignedp
)
2247 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2248 unsignedp
, OPTAB_WIDEN
);
2249 if (temp
|| methods
== OPTAB_WIDEN
)
2252 /* Use the right width libcall if that exists. */
2253 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2254 if (temp
|| methods
== OPTAB_LIB
)
2257 /* Must widen and use a libcall, use either signed or unsigned. */
2258 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2259 unsignedp
, methods
);
2263 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2264 unsignedp
, methods
);
2268 /* Generate code to perform an operation specified by UNOPPTAB
2269 on operand OP0, with two results to TARG0 and TARG1.
2270 We assume that the order of the operands for the instruction
2271 is TARG0, TARG1, OP0.
2273 Either TARG0 or TARG1 may be zero, but what that means is that
2274 the result is not actually wanted. We will generate it into
2275 a dummy pseudo-reg and discard it. They may not both be zero.
2277 Returns 1 if this operation can be performed; 0 if not. */
2280 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2283 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2284 enum mode_class mclass
;
2285 enum machine_mode wider_mode
;
2286 rtx entry_last
= get_last_insn ();
2289 mclass
= GET_MODE_CLASS (mode
);
2292 targ0
= gen_reg_rtx (mode
);
2294 targ1
= gen_reg_rtx (mode
);
2296 /* Record where to go back to if we fail. */
2297 last
= get_last_insn ();
2299 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2301 struct expand_operand ops
[3];
2302 enum insn_code icode
= optab_handler (unoptab
, mode
);
2304 create_fixed_operand (&ops
[0], targ0
);
2305 create_fixed_operand (&ops
[1], targ1
);
2306 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2307 if (maybe_expand_insn (icode
, 3, ops
))
2311 /* It can't be done in this mode. Can we do it in a wider mode? */
2313 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2315 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2316 wider_mode
!= VOIDmode
;
2317 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2319 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2321 rtx t0
= gen_reg_rtx (wider_mode
);
2322 rtx t1
= gen_reg_rtx (wider_mode
);
2323 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2325 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2327 convert_move (targ0
, t0
, unsignedp
);
2328 convert_move (targ1
, t1
, unsignedp
);
2332 delete_insns_since (last
);
2337 delete_insns_since (entry_last
);
2341 /* Generate code to perform an operation specified by BINOPTAB
2342 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2343 We assume that the order of the operands for the instruction
2344 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2345 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2347 Either TARG0 or TARG1 may be zero, but what that means is that
2348 the result is not actually wanted. We will generate it into
2349 a dummy pseudo-reg and discard it. They may not both be zero.
2351 Returns 1 if this operation can be performed; 0 if not. */
2354 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2357 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2358 enum mode_class mclass
;
2359 enum machine_mode wider_mode
;
2360 rtx entry_last
= get_last_insn ();
2363 mclass
= GET_MODE_CLASS (mode
);
2366 targ0
= gen_reg_rtx (mode
);
2368 targ1
= gen_reg_rtx (mode
);
2370 /* Record where to go back to if we fail. */
2371 last
= get_last_insn ();
2373 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2375 struct expand_operand ops
[4];
2376 enum insn_code icode
= optab_handler (binoptab
, mode
);
2377 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2378 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2379 rtx xop0
= op0
, xop1
= op1
;
2381 /* If we are optimizing, force expensive constants into a register. */
2382 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2383 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2385 create_fixed_operand (&ops
[0], targ0
);
2386 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2387 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2388 create_fixed_operand (&ops
[3], targ1
);
2389 if (maybe_expand_insn (icode
, 4, ops
))
2391 delete_insns_since (last
);
2394 /* It can't be done in this mode. Can we do it in a wider mode? */
2396 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2398 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2399 wider_mode
!= VOIDmode
;
2400 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2402 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2404 rtx t0
= gen_reg_rtx (wider_mode
);
2405 rtx t1
= gen_reg_rtx (wider_mode
);
2406 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2407 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2409 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2412 convert_move (targ0
, t0
, unsignedp
);
2413 convert_move (targ1
, t1
, unsignedp
);
2417 delete_insns_since (last
);
2422 delete_insns_since (entry_last
);
2426 /* Expand the two-valued library call indicated by BINOPTAB, but
2427 preserve only one of the values. If TARG0 is non-NULL, the first
2428 value is placed into TARG0; otherwise the second value is placed
2429 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2430 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2431 This routine assumes that the value returned by the library call is
2432 as if the return value was of an integral mode twice as wide as the
2433 mode of OP0. Returns 1 if the call was successful. */
2436 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2437 rtx targ0
, rtx targ1
, enum rtx_code code
)
2439 enum machine_mode mode
;
2440 enum machine_mode libval_mode
;
2445 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2446 gcc_assert (!targ0
!= !targ1
);
2448 mode
= GET_MODE (op0
);
2449 libfunc
= optab_libfunc (binoptab
, mode
);
2453 /* The value returned by the library function will have twice as
2454 many bits as the nominal MODE. */
2455 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2458 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2462 /* Get the part of VAL containing the value that we want. */
2463 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2464 targ0
? 0 : GET_MODE_SIZE (mode
));
2465 insns
= get_insns ();
2467 /* Move the into the desired location. */
2468 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2469 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2475 /* Wrapper around expand_unop which takes an rtx code to specify
2476 the operation to perform, not an optab pointer. All other
2477 arguments are the same. */
2479 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2480 rtx target
, int unsignedp
)
2482 optab unop
= code_to_optab
[(int) code
];
2485 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2491 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2493 A similar operation can be used for clrsb. UNOPTAB says which operation
2494 we are trying to expand. */
2496 widen_leading (enum machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2498 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2499 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2501 enum machine_mode wider_mode
;
2502 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2503 wider_mode
!= VOIDmode
;
2504 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2506 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2508 rtx xop0
, temp
, last
;
2510 last
= get_last_insn ();
2513 target
= gen_reg_rtx (mode
);
2514 xop0
= widen_operand (op0
, wider_mode
, mode
,
2515 unoptab
!= clrsb_optab
, false);
2516 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2517 unoptab
!= clrsb_optab
);
2519 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2520 GEN_INT (GET_MODE_PRECISION (wider_mode
)
2521 - GET_MODE_PRECISION (mode
)),
2522 target
, true, OPTAB_DIRECT
);
2524 delete_insns_since (last
);
2533 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2534 quantities, choosing which based on whether the high word is nonzero. */
2536 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2538 rtx xop0
= force_reg (mode
, op0
);
2539 rtx subhi
= gen_highpart (word_mode
, xop0
);
2540 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2541 rtx hi0_label
= gen_label_rtx ();
2542 rtx after_label
= gen_label_rtx ();
2543 rtx seq
, temp
, result
;
2545 /* If we were not given a target, use a word_mode register, not a
2546 'mode' register. The result will fit, and nobody is expecting
2547 anything bigger (the return type of __builtin_clz* is int). */
2549 target
= gen_reg_rtx (word_mode
);
2551 /* In any case, write to a word_mode scratch in both branches of the
2552 conditional, so we can ensure there is a single move insn setting
2553 'target' to tag a REG_EQUAL note on. */
2554 result
= gen_reg_rtx (word_mode
);
2558 /* If the high word is not equal to zero,
2559 then clz of the full value is clz of the high word. */
2560 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2561 word_mode
, true, hi0_label
);
2563 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2568 convert_move (result
, temp
, true);
2570 emit_jump_insn (gen_jump (after_label
));
2573 /* Else clz of the full value is clz of the low word plus the number
2574 of bits in the high word. */
2575 emit_label (hi0_label
);
2577 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2580 temp
= expand_binop (word_mode
, add_optab
, temp
,
2581 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2582 result
, true, OPTAB_DIRECT
);
2586 convert_move (result
, temp
, true);
2588 emit_label (after_label
);
2589 convert_move (target
, result
, true);
2594 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2606 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2608 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2610 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2611 enum machine_mode wider_mode
;
2614 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2617 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2618 wider_mode
!= VOIDmode
;
2619 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2620 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2625 last
= get_last_insn ();
2627 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2628 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2630 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2631 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2633 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2634 GET_MODE_BITSIZE (wider_mode
)
2635 - GET_MODE_BITSIZE (mode
),
2641 target
= gen_reg_rtx (mode
);
2642 emit_move_insn (target
, gen_lowpart (mode
, x
));
2645 delete_insns_since (last
);
2650 /* Try calculating bswap as two bswaps of two word-sized operands. */
2653 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2657 t1
= expand_unop (word_mode
, bswap_optab
,
2658 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2659 t0
= expand_unop (word_mode
, bswap_optab
,
2660 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2662 if (target
== 0 || !valid_multiword_target_p (target
))
2663 target
= gen_reg_rtx (mode
);
2665 emit_clobber (target
);
2666 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2667 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2672 /* Try calculating (parity x) as (and (popcount x) 1), where
2673 popcount can also be done in a wider mode. */
2675 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2677 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2678 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2680 enum machine_mode wider_mode
;
2681 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2682 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2684 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2686 rtx xop0
, temp
, last
;
2688 last
= get_last_insn ();
2691 target
= gen_reg_rtx (mode
);
2692 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2693 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2696 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2697 target
, true, OPTAB_DIRECT
);
2699 delete_insns_since (last
);
2708 /* Try calculating ctz(x) as K - clz(x & -x) ,
2709 where K is GET_MODE_PRECISION(mode) - 1.
2711 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2712 don't have to worry about what the hardware does in that case. (If
2713 the clz instruction produces the usual value at 0, which is K, the
2714 result of this code sequence will be -1; expand_ffs, below, relies
2715 on this. It might be nice to have it be K instead, for consistency
2716 with the (very few) processors that provide a ctz with a defined
2717 value, but that would take one more instruction, and it would be
2718 less convenient for expand_ffs anyway. */
2721 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2725 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2730 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2732 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2733 true, OPTAB_DIRECT
);
2735 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2737 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_PRECISION (mode
) - 1),
2739 true, OPTAB_DIRECT
);
2749 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2755 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2756 else with the sequence used by expand_clz.
2758 The ffs builtin promises to return zero for a zero value and ctz/clz
2759 may have an undefined value in that case. If they do not give us a
2760 convenient value, we have to generate a test and branch. */
2762 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2764 HOST_WIDE_INT val
= 0;
2765 bool defined_at_zero
= false;
2768 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2772 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2776 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2778 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2781 temp
= expand_ctz (mode
, op0
, 0);
2785 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2787 defined_at_zero
= true;
2788 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2794 if (defined_at_zero
&& val
== -1)
2795 /* No correction needed at zero. */;
2798 /* We don't try to do anything clever with the situation found
2799 on some processors (eg Alpha) where ctz(0:mode) ==
2800 bitsize(mode). If someone can think of a way to send N to -1
2801 and leave alone all values in the range 0..N-1 (where N is a
2802 power of two), cheaper than this test-and-branch, please add it.
2804 The test-and-branch is done after the operation itself, in case
2805 the operation sets condition codes that can be recycled for this.
2806 (This is true on i386, for instance.) */
2808 rtx nonzero_label
= gen_label_rtx ();
2809 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2810 mode
, true, nonzero_label
);
2812 convert_move (temp
, GEN_INT (-1), false);
2813 emit_label (nonzero_label
);
2816 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2817 to produce a value in the range 0..bitsize. */
2818 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2819 target
, false, OPTAB_DIRECT
);
2826 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2835 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2836 conditions, VAL may already be a SUBREG against which we cannot generate
2837 a further SUBREG. In this case, we expect forcing the value into a
2838 register will work around the situation. */
2841 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2842 enum machine_mode imode
)
2845 ret
= lowpart_subreg (omode
, val
, imode
);
2848 val
= force_reg (imode
, val
);
2849 ret
= lowpart_subreg (omode
, val
, imode
);
2850 gcc_assert (ret
!= NULL
);
2855 /* Expand a floating point absolute value or negation operation via a
2856 logical operation on the sign bit. */
2859 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2860 rtx op0
, rtx target
)
2862 const struct real_format
*fmt
;
2863 int bitpos
, word
, nwords
, i
;
2864 enum machine_mode imode
;
2868 /* The format has to have a simple sign bit. */
2869 fmt
= REAL_MODE_FORMAT (mode
);
2873 bitpos
= fmt
->signbit_rw
;
2877 /* Don't create negative zeros if the format doesn't support them. */
2878 if (code
== NEG
&& !fmt
->has_signed_zero
)
2881 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2883 imode
= int_mode_for_mode (mode
);
2884 if (imode
== BLKmode
)
2893 if (FLOAT_WORDS_BIG_ENDIAN
)
2894 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2896 word
= bitpos
/ BITS_PER_WORD
;
2897 bitpos
= bitpos
% BITS_PER_WORD
;
2898 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2901 mask
= double_int_setbit (double_int_zero
, bitpos
);
2903 mask
= double_int_not (mask
);
2907 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2908 target
= gen_reg_rtx (mode
);
2914 for (i
= 0; i
< nwords
; ++i
)
2916 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2917 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2921 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2923 immed_double_int_const (mask
, imode
),
2924 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2925 if (temp
!= targ_piece
)
2926 emit_move_insn (targ_piece
, temp
);
2929 emit_move_insn (targ_piece
, op0_piece
);
2932 insns
= get_insns ();
2939 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2940 gen_lowpart (imode
, op0
),
2941 immed_double_int_const (mask
, imode
),
2942 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2943 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2945 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2946 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2952 /* As expand_unop, but will fail rather than attempt the operation in a
2953 different mode or with a libcall. */
2955 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2958 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2960 struct expand_operand ops
[2];
2961 enum insn_code icode
= optab_handler (unoptab
, mode
);
2962 rtx last
= get_last_insn ();
2965 create_output_operand (&ops
[0], target
, mode
);
2966 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2967 pat
= maybe_gen_insn (icode
, 2, ops
);
2970 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2971 && ! add_equal_note (pat
, ops
[0].value
, unoptab
->code
,
2972 ops
[1].value
, NULL_RTX
))
2974 delete_insns_since (last
);
2975 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2980 return ops
[0].value
;
2986 /* Generate code to perform an operation specified by UNOPTAB
2987 on operand OP0, with result having machine-mode MODE.
2989 UNSIGNEDP is for the case where we have to widen the operands
2990 to perform the operation. It says to use zero-extension.
2992 If TARGET is nonzero, the value
2993 is generated there, if it is convenient to do so.
2994 In all cases an rtx is returned for the locus of the value;
2995 this may or may not be TARGET. */
2998 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3001 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3002 enum machine_mode wider_mode
;
3006 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3010 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3012 /* Widening (or narrowing) clz needs special treatment. */
3013 if (unoptab
== clz_optab
)
3015 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3019 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3020 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3022 temp
= expand_doubleword_clz (mode
, op0
, target
);
3030 if (unoptab
== clrsb_optab
)
3032 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3038 /* Widening (or narrowing) bswap needs special treatment. */
3039 if (unoptab
== bswap_optab
)
3041 temp
= widen_bswap (mode
, op0
, target
);
3045 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3046 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3048 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3056 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3057 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3058 wider_mode
!= VOIDmode
;
3059 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3061 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3064 rtx last
= get_last_insn ();
3066 /* For certain operations, we need not actually extend
3067 the narrow operand, as long as we will truncate the
3068 results to the same narrowness. */
3070 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3071 (unoptab
== neg_optab
3072 || unoptab
== one_cmpl_optab
)
3073 && mclass
== MODE_INT
);
3075 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3080 if (mclass
!= MODE_INT
3081 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3084 target
= gen_reg_rtx (mode
);
3085 convert_move (target
, temp
, 0);
3089 return gen_lowpart (mode
, temp
);
3092 delete_insns_since (last
);
3096 /* These can be done a word at a time. */
3097 if (unoptab
== one_cmpl_optab
3098 && mclass
== MODE_INT
3099 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3100 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3105 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
3106 target
= gen_reg_rtx (mode
);
3110 /* Do the actual arithmetic. */
3111 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3113 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3114 rtx x
= expand_unop (word_mode
, unoptab
,
3115 operand_subword_force (op0
, i
, mode
),
3116 target_piece
, unsignedp
);
3118 if (target_piece
!= x
)
3119 emit_move_insn (target_piece
, x
);
3122 insns
= get_insns ();
3129 if (unoptab
->code
== NEG
)
3131 /* Try negating floating point values by flipping the sign bit. */
3132 if (SCALAR_FLOAT_MODE_P (mode
))
3134 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3139 /* If there is no negation pattern, and we have no negative zero,
3140 try subtracting from zero. */
3141 if (!HONOR_SIGNED_ZEROS (mode
))
3143 temp
= expand_binop (mode
, (unoptab
== negv_optab
3144 ? subv_optab
: sub_optab
),
3145 CONST0_RTX (mode
), op0
, target
,
3146 unsignedp
, OPTAB_DIRECT
);
3152 /* Try calculating parity (x) as popcount (x) % 2. */
3153 if (unoptab
== parity_optab
)
3155 temp
= expand_parity (mode
, op0
, target
);
3160 /* Try implementing ffs (x) in terms of clz (x). */
3161 if (unoptab
== ffs_optab
)
3163 temp
= expand_ffs (mode
, op0
, target
);
3168 /* Try implementing ctz (x) in terms of clz (x). */
3169 if (unoptab
== ctz_optab
)
3171 temp
= expand_ctz (mode
, op0
, target
);
3177 /* Now try a library call in this mode. */
3178 libfunc
= optab_libfunc (unoptab
, mode
);
3184 enum machine_mode outmode
= mode
;
3186 /* All of these functions return small values. Thus we choose to
3187 have them return something that isn't a double-word. */
3188 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3189 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3190 || unoptab
== parity_optab
)
3192 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3193 optab_libfunc (unoptab
, mode
)));
3197 /* Pass 1 for NO_QUEUE so we don't lose any increments
3198 if the libcall is cse'd or moved. */
3199 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3201 insns
= get_insns ();
3204 target
= gen_reg_rtx (outmode
);
3205 eq_value
= gen_rtx_fmt_e (unoptab
->code
, mode
, op0
);
3206 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
3207 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3208 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
3209 eq_value
= simplify_gen_unary (ZERO_EXTEND
, outmode
, eq_value
, mode
);
3210 emit_libcall_block (insns
, target
, value
, eq_value
);
3215 /* It can't be done in this mode. Can we do it in a wider mode? */
3217 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3219 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3220 wider_mode
!= VOIDmode
;
3221 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3223 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3224 || optab_libfunc (unoptab
, wider_mode
))
3227 rtx last
= get_last_insn ();
3229 /* For certain operations, we need not actually extend
3230 the narrow operand, as long as we will truncate the
3231 results to the same narrowness. */
3233 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3234 (unoptab
== neg_optab
3235 || unoptab
== one_cmpl_optab
)
3236 && mclass
== MODE_INT
);
3238 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3241 /* If we are generating clz using wider mode, adjust the
3242 result. Similarly for clrsb. */
3243 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3245 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3246 GEN_INT (GET_MODE_PRECISION (wider_mode
)
3247 - GET_MODE_PRECISION (mode
)),
3248 target
, true, OPTAB_DIRECT
);
3252 if (mclass
!= MODE_INT
)
3255 target
= gen_reg_rtx (mode
);
3256 convert_move (target
, temp
, 0);
3260 return gen_lowpart (mode
, temp
);
3263 delete_insns_since (last
);
3268 /* One final attempt at implementing negation via subtraction,
3269 this time allowing widening of the operand. */
3270 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3273 temp
= expand_binop (mode
,
3274 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3275 CONST0_RTX (mode
), op0
,
3276 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3284 /* Emit code to compute the absolute value of OP0, with result to
3285 TARGET if convenient. (TARGET may be 0.) The return value says
3286 where the result actually is to be found.
3288 MODE is the mode of the operand; the mode of the result is
3289 different but can be deduced from MODE.
3294 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3295 int result_unsignedp
)
3300 result_unsignedp
= 1;
3302 /* First try to do it with a special abs instruction. */
3303 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3308 /* For floating point modes, try clearing the sign bit. */
3309 if (SCALAR_FLOAT_MODE_P (mode
))
3311 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3316 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3317 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3318 && !HONOR_SIGNED_ZEROS (mode
))
3320 rtx last
= get_last_insn ();
3322 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3324 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3330 delete_insns_since (last
);
3333 /* If this machine has expensive jumps, we can do integer absolute
3334 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3335 where W is the width of MODE. */
3337 if (GET_MODE_CLASS (mode
) == MODE_INT
3338 && BRANCH_COST (optimize_insn_for_speed_p (),
3341 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3342 GET_MODE_PRECISION (mode
) - 1,
3345 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3348 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3349 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3359 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3360 int result_unsignedp
, int safe
)
3365 result_unsignedp
= 1;
3367 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3371 /* If that does not win, use conditional jump and negate. */
3373 /* It is safe to use the target if it is the same
3374 as the source if this is also a pseudo register */
3375 if (op0
== target
&& REG_P (op0
)
3376 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3379 op1
= gen_label_rtx ();
3380 if (target
== 0 || ! safe
3381 || GET_MODE (target
) != mode
3382 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3384 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3385 target
= gen_reg_rtx (mode
);
3387 emit_move_insn (target
, op0
);
3390 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3391 NULL_RTX
, NULL_RTX
, op1
, -1);
3393 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3396 emit_move_insn (target
, op0
);
3402 /* Emit code to compute the one's complement absolute value of OP0
3403 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3404 (TARGET may be NULL_RTX.) The return value says where the result
3405 actually is to be found.
3407 MODE is the mode of the operand; the mode of the result is
3408 different but can be deduced from MODE. */
3411 expand_one_cmpl_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
)
3415 /* Not applicable for floating point modes. */
3416 if (FLOAT_MODE_P (mode
))
3419 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3420 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3422 rtx last
= get_last_insn ();
3424 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3426 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3432 delete_insns_since (last
);
3435 /* If this machine has expensive jumps, we can do one's complement
3436 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3438 if (GET_MODE_CLASS (mode
) == MODE_INT
3439 && BRANCH_COST (optimize_insn_for_speed_p (),
3442 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3443 GET_MODE_PRECISION (mode
) - 1,
3446 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3456 /* A subroutine of expand_copysign, perform the copysign operation using the
3457 abs and neg primitives advertised to exist on the target. The assumption
3458 is that we have a split register file, and leaving op0 in fp registers,
3459 and not playing with subregs so much, will help the register allocator. */
3462 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3463 int bitpos
, bool op0_is_abs
)
3465 enum machine_mode imode
;
3466 enum insn_code icode
;
3472 /* Check if the back end provides an insn that handles signbit for the
3474 icode
= optab_handler (signbit_optab
, mode
);
3475 if (icode
!= CODE_FOR_nothing
)
3477 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3478 sign
= gen_reg_rtx (imode
);
3479 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3485 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3487 imode
= int_mode_for_mode (mode
);
3488 if (imode
== BLKmode
)
3490 op1
= gen_lowpart (imode
, op1
);
3497 if (FLOAT_WORDS_BIG_ENDIAN
)
3498 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3500 word
= bitpos
/ BITS_PER_WORD
;
3501 bitpos
= bitpos
% BITS_PER_WORD
;
3502 op1
= operand_subword_force (op1
, word
, mode
);
3505 mask
= double_int_setbit (double_int_zero
, bitpos
);
3507 sign
= expand_binop (imode
, and_optab
, op1
,
3508 immed_double_int_const (mask
, imode
),
3509 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3514 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3521 if (target
== NULL_RTX
)
3522 target
= copy_to_reg (op0
);
3524 emit_move_insn (target
, op0
);
3527 label
= gen_label_rtx ();
3528 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3530 if (GET_CODE (op0
) == CONST_DOUBLE
)
3531 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3533 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3535 emit_move_insn (target
, op0
);
3543 /* A subroutine of expand_copysign, perform the entire copysign operation
3544 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3545 is true if op0 is known to have its sign bit clear. */
3548 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3549 int bitpos
, bool op0_is_abs
)
3551 enum machine_mode imode
;
3553 int word
, nwords
, i
;
3556 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3558 imode
= int_mode_for_mode (mode
);
3559 if (imode
== BLKmode
)
3568 if (FLOAT_WORDS_BIG_ENDIAN
)
3569 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3571 word
= bitpos
/ BITS_PER_WORD
;
3572 bitpos
= bitpos
% BITS_PER_WORD
;
3573 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3576 mask
= double_int_setbit (double_int_zero
, bitpos
);
3581 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3582 target
= gen_reg_rtx (mode
);
3588 for (i
= 0; i
< nwords
; ++i
)
3590 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3591 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3597 = expand_binop (imode
, and_optab
, op0_piece
,
3598 immed_double_int_const (double_int_not (mask
),
3600 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3602 op1
= expand_binop (imode
, and_optab
,
3603 operand_subword_force (op1
, i
, mode
),
3604 immed_double_int_const (mask
, imode
),
3605 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3607 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3608 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3609 if (temp
!= targ_piece
)
3610 emit_move_insn (targ_piece
, temp
);
3613 emit_move_insn (targ_piece
, op0_piece
);
3616 insns
= get_insns ();
3623 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3624 immed_double_int_const (mask
, imode
),
3625 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3627 op0
= gen_lowpart (imode
, op0
);
3629 op0
= expand_binop (imode
, and_optab
, op0
,
3630 immed_double_int_const (double_int_not (mask
),
3632 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3634 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3635 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3636 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3642 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3643 scalar floating point mode. Return NULL if we do not know how to
3644 expand the operation inline. */
3647 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3649 enum machine_mode mode
= GET_MODE (op0
);
3650 const struct real_format
*fmt
;
3654 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3655 gcc_assert (GET_MODE (op1
) == mode
);
3657 /* First try to do it with a special instruction. */
3658 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3659 target
, 0, OPTAB_DIRECT
);
3663 fmt
= REAL_MODE_FORMAT (mode
);
3664 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3668 if (GET_CODE (op0
) == CONST_DOUBLE
)
3670 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3671 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3675 if (fmt
->signbit_ro
>= 0
3676 && (GET_CODE (op0
) == CONST_DOUBLE
3677 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3678 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3680 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3681 fmt
->signbit_ro
, op0_is_abs
);
3686 if (fmt
->signbit_rw
< 0)
3688 return expand_copysign_bit (mode
, op0
, op1
, target
,
3689 fmt
->signbit_rw
, op0_is_abs
);
3692 /* Generate an instruction whose insn-code is INSN_CODE,
3693 with two operands: an output TARGET and an input OP0.
3694 TARGET *must* be nonzero, and the output is always stored there.
3695 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3696 the value that is stored into TARGET.
3698 Return false if expansion failed. */
3701 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3704 struct expand_operand ops
[2];
3707 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3708 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3709 pat
= maybe_gen_insn (icode
, 2, ops
);
3713 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3714 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3718 if (ops
[0].value
!= target
)
3719 emit_move_insn (target
, ops
[0].value
);
3722 /* Generate an instruction whose insn-code is INSN_CODE,
3723 with two operands: an output TARGET and an input OP0.
3724 TARGET *must* be nonzero, and the output is always stored there.
3725 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3726 the value that is stored into TARGET. */
3729 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3731 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3735 struct no_conflict_data
3737 rtx target
, first
, insn
;
3741 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3742 the currently examined clobber / store has to stay in the list of
3743 insns that constitute the actual libcall block. */
3745 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3747 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3749 /* If this inns directly contributes to setting the target, it must stay. */
3750 if (reg_overlap_mentioned_p (p
->target
, dest
))
3751 p
->must_stay
= true;
3752 /* If we haven't committed to keeping any other insns in the list yet,
3753 there is nothing more to check. */
3754 else if (p
->insn
== p
->first
)
3756 /* If this insn sets / clobbers a register that feeds one of the insns
3757 already in the list, this insn has to stay too. */
3758 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3759 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3760 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3761 /* Likewise if this insn depends on a register set by a previous
3762 insn in the list, or if it sets a result (presumably a hard
3763 register) that is set or clobbered by a previous insn.
3764 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3765 SET_DEST perform the former check on the address, and the latter
3766 check on the MEM. */
3767 || (GET_CODE (set
) == SET
3768 && (modified_in_p (SET_SRC (set
), p
->first
)
3769 || modified_in_p (SET_DEST (set
), p
->first
)
3770 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3771 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3772 p
->must_stay
= true;
3776 /* Emit code to make a call to a constant function or a library call.
3778 INSNS is a list containing all insns emitted in the call.
3779 These insns leave the result in RESULT. Our block is to copy RESULT
3780 to TARGET, which is logically equivalent to EQUIV.
3782 We first emit any insns that set a pseudo on the assumption that these are
3783 loading constants into registers; doing so allows them to be safely cse'ed
3784 between blocks. Then we emit all the other insns in the block, followed by
3785 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3786 note with an operand of EQUIV. */
3789 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3791 rtx final_dest
= target
;
3792 rtx next
, last
, insn
;
3794 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3795 into a MEM later. Protect the libcall block from this change. */
3796 if (! REG_P (target
) || REG_USERVAR_P (target
))
3797 target
= gen_reg_rtx (GET_MODE (target
));
3799 /* If we're using non-call exceptions, a libcall corresponding to an
3800 operation that may trap may also trap. */
3801 /* ??? See the comment in front of make_reg_eh_region_note. */
3802 if (cfun
->can_throw_non_call_exceptions
&& may_trap_p (equiv
))
3804 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3807 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3810 int lp_nr
= INTVAL (XEXP (note
, 0));
3811 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3812 remove_note (insn
, note
);
3818 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3819 reg note to indicate that this call cannot throw or execute a nonlocal
3820 goto (unless there is already a REG_EH_REGION note, in which case
3822 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3824 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3827 /* First emit all insns that set pseudos. Remove them from the list as
3828 we go. Avoid insns that set pseudos which were referenced in previous
3829 insns. These can be generated by move_by_pieces, for example,
3830 to update an address. Similarly, avoid insns that reference things
3831 set in previous insns. */
3833 for (insn
= insns
; insn
; insn
= next
)
3835 rtx set
= single_set (insn
);
3837 next
= NEXT_INSN (insn
);
3839 if (set
!= 0 && REG_P (SET_DEST (set
))
3840 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3842 struct no_conflict_data data
;
3844 data
.target
= const0_rtx
;
3848 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3849 if (! data
.must_stay
)
3851 if (PREV_INSN (insn
))
3852 NEXT_INSN (PREV_INSN (insn
)) = next
;
3857 PREV_INSN (next
) = PREV_INSN (insn
);
3863 /* Some ports use a loop to copy large arguments onto the stack.
3864 Don't move anything outside such a loop. */
3869 /* Write the remaining insns followed by the final copy. */
3870 for (insn
= insns
; insn
; insn
= next
)
3872 next
= NEXT_INSN (insn
);
3877 last
= emit_move_insn (target
, result
);
3878 if (optab_handler (mov_optab
, GET_MODE (target
)) != CODE_FOR_nothing
)
3879 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3881 if (final_dest
!= target
)
3882 emit_move_insn (final_dest
, target
);
3885 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3886 PURPOSE describes how this comparison will be used. CODE is the rtx
3887 comparison code we will be using.
3889 ??? Actually, CODE is slightly weaker than that. A target is still
3890 required to implement all of the normal bcc operations, but not
3891 required to implement all (or any) of the unordered bcc operations. */
3894 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3895 enum can_compare_purpose purpose
)
3898 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3901 enum insn_code icode
;
3903 if (purpose
== ccp_jump
3904 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3905 && insn_operand_matches (icode
, 0, test
))
3907 if (purpose
== ccp_store_flag
3908 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3909 && insn_operand_matches (icode
, 1, test
))
3911 if (purpose
== ccp_cmov
3912 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3915 mode
= GET_MODE_WIDER_MODE (mode
);
3916 PUT_MODE (test
, mode
);
3918 while (mode
!= VOIDmode
);
3923 /* This function is called when we are going to emit a compare instruction that
3924 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3926 *PMODE is the mode of the inputs (in case they are const_int).
3927 *PUNSIGNEDP nonzero says that the operands are unsigned;
3928 this matters if they need to be widened (as given by METHODS).
3930 If they have mode BLKmode, then SIZE specifies the size of both operands.
3932 This function performs all the setup necessary so that the caller only has
3933 to emit a single comparison insn. This setup can involve doing a BLKmode
3934 comparison or emitting a library call to perform the comparison if no insn
3935 is available to handle it.
3936 The values which are passed in through pointers can be modified; the caller
3937 should perform the comparison on the modified values. Constant
3938 comparisons must have already been folded. */
3941 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3942 int unsignedp
, enum optab_methods methods
,
3943 rtx
*ptest
, enum machine_mode
*pmode
)
3945 enum machine_mode mode
= *pmode
;
3947 enum machine_mode cmp_mode
;
3948 enum mode_class mclass
;
3950 /* The other methods are not needed. */
3951 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3952 || methods
== OPTAB_LIB_WIDEN
);
3954 /* If we are optimizing, force expensive constants into a register. */
3955 if (CONSTANT_P (x
) && optimize
3956 && (rtx_cost (x
, COMPARE
, 0, optimize_insn_for_speed_p ())
3957 > COSTS_N_INSNS (1)))
3958 x
= force_reg (mode
, x
);
3960 if (CONSTANT_P (y
) && optimize
3961 && (rtx_cost (y
, COMPARE
, 1, optimize_insn_for_speed_p ())
3962 > COSTS_N_INSNS (1)))
3963 y
= force_reg (mode
, y
);
3966 /* Make sure if we have a canonical comparison. The RTL
3967 documentation states that canonical comparisons are required only
3968 for targets which have cc0. */
3969 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3972 /* Don't let both operands fail to indicate the mode. */
3973 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3974 x
= force_reg (mode
, x
);
3975 if (mode
== VOIDmode
)
3976 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3978 /* Handle all BLKmode compares. */
3980 if (mode
== BLKmode
)
3982 enum machine_mode result_mode
;
3983 enum insn_code cmp_code
;
3988 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3992 /* Try to use a memory block compare insn - either cmpstr
3993 or cmpmem will do. */
3994 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3995 cmp_mode
!= VOIDmode
;
3996 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3998 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3999 if (cmp_code
== CODE_FOR_nothing
)
4000 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4001 if (cmp_code
== CODE_FOR_nothing
)
4002 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4003 if (cmp_code
== CODE_FOR_nothing
)
4006 /* Must make sure the size fits the insn's mode. */
4007 if ((CONST_INT_P (size
)
4008 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4009 || (GET_MODE_BITSIZE (GET_MODE (size
))
4010 > GET_MODE_BITSIZE (cmp_mode
)))
4013 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4014 result
= gen_reg_rtx (result_mode
);
4015 size
= convert_to_mode (cmp_mode
, size
, 1);
4016 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4018 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4019 *pmode
= result_mode
;
4023 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4026 /* Otherwise call a library function, memcmp. */
4027 libfunc
= memcmp_libfunc
;
4028 length_type
= sizetype
;
4029 result_mode
= TYPE_MODE (integer_type_node
);
4030 cmp_mode
= TYPE_MODE (length_type
);
4031 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4032 TYPE_UNSIGNED (length_type
));
4034 result
= emit_library_call_value (libfunc
, 0, LCT_PURE
,
4040 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4041 *pmode
= result_mode
;
4045 /* Don't allow operands to the compare to trap, as that can put the
4046 compare and branch in different basic blocks. */
4047 if (cfun
->can_throw_non_call_exceptions
)
4050 x
= force_reg (mode
, x
);
4052 y
= force_reg (mode
, y
);
4055 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4057 gcc_assert (can_compare_p (comparison
, CCmode
, ccp_jump
));
4058 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4062 mclass
= GET_MODE_CLASS (mode
);
4063 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4067 enum insn_code icode
;
4068 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4069 if (icode
!= CODE_FOR_nothing
4070 && insn_operand_matches (icode
, 0, test
))
4072 rtx last
= get_last_insn ();
4073 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4074 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4076 && insn_operand_matches (icode
, 1, op0
)
4077 && insn_operand_matches (icode
, 2, op1
))
4079 XEXP (test
, 0) = op0
;
4080 XEXP (test
, 1) = op1
;
4085 delete_insns_since (last
);
4088 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4090 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
4092 while (cmp_mode
!= VOIDmode
);
4094 if (methods
!= OPTAB_LIB_WIDEN
)
4097 if (!SCALAR_FLOAT_MODE_P (mode
))
4101 /* Handle a libcall just for the mode we are using. */
4102 libfunc
= optab_libfunc (cmp_optab
, mode
);
4103 gcc_assert (libfunc
);
4105 /* If we want unsigned, and this mode has a distinct unsigned
4106 comparison routine, use that. */
4109 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4114 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4115 targetm
.libgcc_cmp_return_mode (),
4116 2, x
, mode
, y
, mode
);
4118 /* There are two kinds of comparison routines. Biased routines
4119 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4120 of gcc expect that the comparison operation is equivalent
4121 to the modified comparison. For signed comparisons compare the
4122 result against 1 in the biased case, and zero in the unbiased
4123 case. For unsigned comparisons always compare against 1 after
4124 biasing the unbiased result by adding 1. This gives us a way to
4126 The comparisons in the fixed-point helper library are always
4131 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4134 x
= plus_constant (result
, 1);
4140 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4144 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4152 /* Before emitting an insn with code ICODE, make sure that X, which is going
4153 to be used for operand OPNUM of the insn, is converted from mode MODE to
4154 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4155 that it is accepted by the operand predicate. Return the new value. */
4158 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, enum machine_mode mode
,
4159 enum machine_mode wider_mode
, int unsignedp
)
4161 if (mode
!= wider_mode
)
4162 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4164 if (!insn_operand_matches (icode
, opnum
, x
))
4166 if (reload_completed
)
4168 x
= copy_to_mode_reg (insn_data
[(int) icode
].operand
[opnum
].mode
, x
);
4174 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4175 we can do the branch. */
4178 emit_cmp_and_jump_insn_1 (rtx test
, enum machine_mode mode
, rtx label
)
4180 enum machine_mode optab_mode
;
4181 enum mode_class mclass
;
4182 enum insn_code icode
;
4184 mclass
= GET_MODE_CLASS (mode
);
4185 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4186 icode
= optab_handler (cbranch_optab
, optab_mode
);
4188 gcc_assert (icode
!= CODE_FOR_nothing
);
4189 gcc_assert (insn_operand_matches (icode
, 0, test
));
4190 emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0), XEXP (test
, 1), label
));
4193 /* Generate code to compare X with Y so that the condition codes are
4194 set and to jump to LABEL if the condition is true. If X is a
4195 constant and Y is not a constant, then the comparison is swapped to
4196 ensure that the comparison RTL has the canonical form.
4198 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4199 need to be widened. UNSIGNEDP is also used to select the proper
4200 branch condition code.
4202 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4204 MODE is the mode of the inputs (in case they are const_int).
4206 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4207 It will be potentially converted into an unsigned variant based on
4208 UNSIGNEDP to select a proper jump instruction. */
4211 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4212 enum machine_mode mode
, int unsignedp
, rtx label
)
4214 rtx op0
= x
, op1
= y
;
4217 /* Swap operands and condition to ensure canonical RTL. */
4218 if (swap_commutative_operands_p (x
, y
)
4219 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4222 comparison
= swap_condition (comparison
);
4225 /* If OP0 is still a constant, then both X and Y must be constants
4226 or the opposite comparison is not supported. Force X into a register
4227 to create canonical RTL. */
4228 if (CONSTANT_P (op0
))
4229 op0
= force_reg (mode
, op0
);
4232 comparison
= unsigned_condition (comparison
);
4234 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4236 emit_cmp_and_jump_insn_1 (test
, mode
, label
);
4240 /* Emit a library call comparison between floating point X and Y.
4241 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4244 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4245 rtx
*ptest
, enum machine_mode
*pmode
)
4247 enum rtx_code swapped
= swap_condition (comparison
);
4248 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4249 enum machine_mode orig_mode
= GET_MODE (x
);
4250 enum machine_mode mode
, cmp_mode
;
4251 rtx true_rtx
, false_rtx
;
4252 rtx value
, target
, insns
, equiv
;
4254 bool reversed_p
= false;
4255 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4257 for (mode
= orig_mode
;
4259 mode
= GET_MODE_WIDER_MODE (mode
))
4261 if (code_to_optab
[comparison
]
4262 && (libfunc
= optab_libfunc (code_to_optab
[comparison
], mode
)))
4265 if (code_to_optab
[swapped
]
4266 && (libfunc
= optab_libfunc (code_to_optab
[swapped
], mode
)))
4269 tmp
= x
; x
= y
; y
= tmp
;
4270 comparison
= swapped
;
4274 if (code_to_optab
[reversed
]
4275 && (libfunc
= optab_libfunc (code_to_optab
[reversed
], mode
)))
4277 comparison
= reversed
;
4283 gcc_assert (mode
!= VOIDmode
);
4285 if (mode
!= orig_mode
)
4287 x
= convert_to_mode (mode
, x
, 0);
4288 y
= convert_to_mode (mode
, y
, 0);
4291 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4292 the RTL. The allows the RTL optimizers to delete the libcall if the
4293 condition can be determined at compile-time. */
4294 if (comparison
== UNORDERED
4295 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4297 true_rtx
= const_true_rtx
;
4298 false_rtx
= const0_rtx
;
4305 true_rtx
= const0_rtx
;
4306 false_rtx
= const_true_rtx
;
4310 true_rtx
= const_true_rtx
;
4311 false_rtx
= const0_rtx
;
4315 true_rtx
= const1_rtx
;
4316 false_rtx
= const0_rtx
;
4320 true_rtx
= const0_rtx
;
4321 false_rtx
= constm1_rtx
;
4325 true_rtx
= constm1_rtx
;
4326 false_rtx
= const0_rtx
;
4330 true_rtx
= const0_rtx
;
4331 false_rtx
= const1_rtx
;
4339 if (comparison
== UNORDERED
)
4341 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4342 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4343 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4344 temp
, const_true_rtx
, equiv
);
4348 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4349 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4350 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4351 equiv
, true_rtx
, false_rtx
);
4355 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4356 cmp_mode
, 2, x
, mode
, y
, mode
);
4357 insns
= get_insns ();
4360 target
= gen_reg_rtx (cmp_mode
);
4361 emit_libcall_block (insns
, target
, value
, equiv
);
4363 if (comparison
== UNORDERED
4364 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4366 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4368 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4373 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4376 emit_indirect_jump (rtx loc
)
4378 struct expand_operand ops
[1];
4380 create_address_operand (&ops
[0], loc
);
4381 expand_jump_insn (CODE_FOR_indirect_jump
, 1, ops
);
4385 #ifdef HAVE_conditional_move
4387 /* Emit a conditional move instruction if the machine supports one for that
4388 condition and machine mode.
4390 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4391 the mode to use should they be constants. If it is VOIDmode, they cannot
4394 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4395 should be stored there. MODE is the mode to use should they be constants.
4396 If it is VOIDmode, they cannot both be constants.
4398 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4399 is not supported. */
4402 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4403 enum machine_mode cmode
, rtx op2
, rtx op3
,
4404 enum machine_mode mode
, int unsignedp
)
4406 rtx tem
, comparison
, last
;
4407 enum insn_code icode
;
4408 enum rtx_code reversed
;
4410 /* If one operand is constant, make it the second one. Only do this
4411 if the other operand is not constant as well. */
4413 if (swap_commutative_operands_p (op0
, op1
))
4418 code
= swap_condition (code
);
4421 /* get_condition will prefer to generate LT and GT even if the old
4422 comparison was against zero, so undo that canonicalization here since
4423 comparisons against zero are cheaper. */
4424 if (code
== LT
&& op1
== const1_rtx
)
4425 code
= LE
, op1
= const0_rtx
;
4426 else if (code
== GT
&& op1
== constm1_rtx
)
4427 code
= GE
, op1
= const0_rtx
;
4429 if (cmode
== VOIDmode
)
4430 cmode
= GET_MODE (op0
);
4432 if (swap_commutative_operands_p (op2
, op3
)
4433 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4442 if (mode
== VOIDmode
)
4443 mode
= GET_MODE (op2
);
4445 icode
= direct_optab_handler (movcc_optab
, mode
);
4447 if (icode
== CODE_FOR_nothing
)
4451 target
= gen_reg_rtx (mode
);
4453 code
= unsignedp
? unsigned_condition (code
) : code
;
4454 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4456 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4457 return NULL and let the caller figure out how best to deal with this
4459 if (!COMPARISON_P (comparison
))
4462 do_pending_stack_adjust ();
4463 last
= get_last_insn ();
4464 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4465 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4466 &comparison
, &cmode
);
4469 struct expand_operand ops
[4];
4471 create_output_operand (&ops
[0], target
, mode
);
4472 create_fixed_operand (&ops
[1], comparison
);
4473 create_input_operand (&ops
[2], op2
, mode
);
4474 create_input_operand (&ops
[3], op3
, mode
);
4475 if (maybe_expand_insn (icode
, 4, ops
))
4477 if (ops
[0].value
!= target
)
4478 convert_move (target
, ops
[0].value
, false);
4482 delete_insns_since (last
);
4486 /* Return nonzero if a conditional move of mode MODE is supported.
4488 This function is for combine so it can tell whether an insn that looks
4489 like a conditional move is actually supported by the hardware. If we
4490 guess wrong we lose a bit on optimization, but that's it. */
4491 /* ??? sparc64 supports conditionally moving integers values based on fp
4492 comparisons, and vice versa. How do we handle them? */
4495 can_conditionally_move_p (enum machine_mode mode
)
4497 if (direct_optab_handler (movcc_optab
, mode
) != CODE_FOR_nothing
)
4503 #endif /* HAVE_conditional_move */
4505 /* Emit a conditional addition instruction if the machine supports one for that
4506 condition and machine mode.
4508 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4509 the mode to use should they be constants. If it is VOIDmode, they cannot
4512 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4513 should be stored there. MODE is the mode to use should they be constants.
4514 If it is VOIDmode, they cannot both be constants.
4516 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4517 is not supported. */
4520 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4521 enum machine_mode cmode
, rtx op2
, rtx op3
,
4522 enum machine_mode mode
, int unsignedp
)
4524 rtx tem
, comparison
, last
;
4525 enum insn_code icode
;
4526 enum rtx_code reversed
;
4528 /* If one operand is constant, make it the second one. Only do this
4529 if the other operand is not constant as well. */
4531 if (swap_commutative_operands_p (op0
, op1
))
4536 code
= swap_condition (code
);
4539 /* get_condition will prefer to generate LT and GT even if the old
4540 comparison was against zero, so undo that canonicalization here since
4541 comparisons against zero are cheaper. */
4542 if (code
== LT
&& op1
== const1_rtx
)
4543 code
= LE
, op1
= const0_rtx
;
4544 else if (code
== GT
&& op1
== constm1_rtx
)
4545 code
= GE
, op1
= const0_rtx
;
4547 if (cmode
== VOIDmode
)
4548 cmode
= GET_MODE (op0
);
4550 if (swap_commutative_operands_p (op2
, op3
)
4551 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4560 if (mode
== VOIDmode
)
4561 mode
= GET_MODE (op2
);
4563 icode
= optab_handler (addcc_optab
, mode
);
4565 if (icode
== CODE_FOR_nothing
)
4569 target
= gen_reg_rtx (mode
);
4571 code
= unsignedp
? unsigned_condition (code
) : code
;
4572 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4574 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4575 return NULL and let the caller figure out how best to deal with this
4577 if (!COMPARISON_P (comparison
))
4580 do_pending_stack_adjust ();
4581 last
= get_last_insn ();
4582 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4583 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4584 &comparison
, &cmode
);
4587 struct expand_operand ops
[4];
4589 create_output_operand (&ops
[0], target
, mode
);
4590 create_fixed_operand (&ops
[1], comparison
);
4591 create_input_operand (&ops
[2], op2
, mode
);
4592 create_input_operand (&ops
[3], op3
, mode
);
4593 if (maybe_expand_insn (icode
, 4, ops
))
4595 if (ops
[0].value
!= target
)
4596 convert_move (target
, ops
[0].value
, false);
4600 delete_insns_since (last
);
4604 /* These functions attempt to generate an insn body, rather than
4605 emitting the insn, but if the gen function already emits them, we
4606 make no attempt to turn them back into naked patterns. */
4608 /* Generate and return an insn body to add Y to X. */
4611 gen_add2_insn (rtx x
, rtx y
)
4613 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4615 gcc_assert (insn_operand_matches (icode
, 0, x
));
4616 gcc_assert (insn_operand_matches (icode
, 1, x
));
4617 gcc_assert (insn_operand_matches (icode
, 2, y
));
4619 return GEN_FCN (icode
) (x
, x
, y
);
4622 /* Generate and return an insn body to add r1 and c,
4623 storing the result in r0. */
4626 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4628 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4630 if (icode
== CODE_FOR_nothing
4631 || !insn_operand_matches (icode
, 0, r0
)
4632 || !insn_operand_matches (icode
, 1, r1
)
4633 || !insn_operand_matches (icode
, 2, c
))
4636 return GEN_FCN (icode
) (r0
, r1
, c
);
4640 have_add2_insn (rtx x
, rtx y
)
4642 enum insn_code icode
;
4644 gcc_assert (GET_MODE (x
) != VOIDmode
);
4646 icode
= optab_handler (add_optab
, GET_MODE (x
));
4648 if (icode
== CODE_FOR_nothing
)
4651 if (!insn_operand_matches (icode
, 0, x
)
4652 || !insn_operand_matches (icode
, 1, x
)
4653 || !insn_operand_matches (icode
, 2, y
))
4659 /* Generate and return an insn body to subtract Y from X. */
4662 gen_sub2_insn (rtx x
, rtx y
)
4664 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4666 gcc_assert (insn_operand_matches (icode
, 0, x
));
4667 gcc_assert (insn_operand_matches (icode
, 1, x
));
4668 gcc_assert (insn_operand_matches (icode
, 2, y
));
4670 return GEN_FCN (icode
) (x
, x
, y
);
4673 /* Generate and return an insn body to subtract r1 and c,
4674 storing the result in r0. */
4677 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4679 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4681 if (icode
== CODE_FOR_nothing
4682 || !insn_operand_matches (icode
, 0, r0
)
4683 || !insn_operand_matches (icode
, 1, r1
)
4684 || !insn_operand_matches (icode
, 2, c
))
4687 return GEN_FCN (icode
) (r0
, r1
, c
);
4691 have_sub2_insn (rtx x
, rtx y
)
4693 enum insn_code icode
;
4695 gcc_assert (GET_MODE (x
) != VOIDmode
);
4697 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4699 if (icode
== CODE_FOR_nothing
)
4702 if (!insn_operand_matches (icode
, 0, x
)
4703 || !insn_operand_matches (icode
, 1, x
)
4704 || !insn_operand_matches (icode
, 2, y
))
4710 /* Generate the body of an instruction to copy Y into X.
4711 It may be a list of insns, if one insn isn't enough. */
4714 gen_move_insn (rtx x
, rtx y
)
4719 emit_move_insn_1 (x
, y
);
4725 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4726 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4727 no such operation exists, CODE_FOR_nothing will be returned. */
4730 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4734 #ifdef HAVE_ptr_extend
4736 return CODE_FOR_ptr_extend
;
4739 tab
= unsignedp
? zext_optab
: sext_optab
;
4740 return convert_optab_handler (tab
, to_mode
, from_mode
);
4743 /* Generate the body of an insn to extend Y (with mode MFROM)
4744 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4747 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4748 enum machine_mode mfrom
, int unsignedp
)
4750 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4751 return GEN_FCN (icode
) (x
, y
);
4754 /* can_fix_p and can_float_p say whether the target machine
4755 can directly convert a given fixed point type to
4756 a given floating point type, or vice versa.
4757 The returned value is the CODE_FOR_... value to use,
4758 or CODE_FOR_nothing if these modes cannot be directly converted.
4760 *TRUNCP_PTR is set to 1 if it is necessary to output
4761 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4763 static enum insn_code
4764 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4765 int unsignedp
, int *truncp_ptr
)
4768 enum insn_code icode
;
4770 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4771 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4772 if (icode
!= CODE_FOR_nothing
)
4778 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4779 for this to work. We need to rework the fix* and ftrunc* patterns
4780 and documentation. */
4781 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4782 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4783 if (icode
!= CODE_FOR_nothing
4784 && optab_handler (ftrunc_optab
, fltmode
) != CODE_FOR_nothing
)
4791 return CODE_FOR_nothing
;
4795 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4800 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4801 return convert_optab_handler (tab
, fltmode
, fixmode
);
4804 /* Generate code to convert FROM to floating point
4805 and store in TO. FROM must be fixed point and not VOIDmode.
4806 UNSIGNEDP nonzero means regard FROM as unsigned.
4807 Normally this is done by correcting the final value
4808 if it is negative. */
4811 expand_float (rtx to
, rtx from
, int unsignedp
)
4813 enum insn_code icode
;
4815 enum machine_mode fmode
, imode
;
4816 bool can_do_signed
= false;
4818 /* Crash now, because we won't be able to decide which mode to use. */
4819 gcc_assert (GET_MODE (from
) != VOIDmode
);
4821 /* Look for an insn to do the conversion. Do it in the specified
4822 modes if possible; otherwise convert either input, output or both to
4823 wider mode. If the integer mode is wider than the mode of FROM,
4824 we can do the conversion signed even if the input is unsigned. */
4826 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4827 fmode
= GET_MODE_WIDER_MODE (fmode
))
4828 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4829 imode
= GET_MODE_WIDER_MODE (imode
))
4831 int doing_unsigned
= unsignedp
;
4833 if (fmode
!= GET_MODE (to
)
4834 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4837 icode
= can_float_p (fmode
, imode
, unsignedp
);
4838 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4840 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4841 if (scode
!= CODE_FOR_nothing
)
4842 can_do_signed
= true;
4843 if (imode
!= GET_MODE (from
))
4844 icode
= scode
, doing_unsigned
= 0;
4847 if (icode
!= CODE_FOR_nothing
)
4849 if (imode
!= GET_MODE (from
))
4850 from
= convert_to_mode (imode
, from
, unsignedp
);
4852 if (fmode
!= GET_MODE (to
))
4853 target
= gen_reg_rtx (fmode
);
4855 emit_unop_insn (icode
, target
, from
,
4856 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4859 convert_move (to
, target
, 0);
4864 /* Unsigned integer, and no way to convert directly. Convert as signed,
4865 then unconditionally adjust the result. */
4866 if (unsignedp
&& can_do_signed
)
4868 rtx label
= gen_label_rtx ();
4870 REAL_VALUE_TYPE offset
;
4872 /* Look for a usable floating mode FMODE wider than the source and at
4873 least as wide as the target. Using FMODE will avoid rounding woes
4874 with unsigned values greater than the signed maximum value. */
4876 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4877 fmode
= GET_MODE_WIDER_MODE (fmode
))
4878 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4879 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4882 if (fmode
== VOIDmode
)
4884 /* There is no such mode. Pretend the target is wide enough. */
4885 fmode
= GET_MODE (to
);
4887 /* Avoid double-rounding when TO is narrower than FROM. */
4888 if ((significand_size (fmode
) + 1)
4889 < GET_MODE_PRECISION (GET_MODE (from
)))
4892 rtx neglabel
= gen_label_rtx ();
4894 /* Don't use TARGET if it isn't a register, is a hard register,
4895 or is the wrong mode. */
4897 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4898 || GET_MODE (target
) != fmode
)
4899 target
= gen_reg_rtx (fmode
);
4901 imode
= GET_MODE (from
);
4902 do_pending_stack_adjust ();
4904 /* Test whether the sign bit is set. */
4905 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4908 /* The sign bit is not set. Convert as signed. */
4909 expand_float (target
, from
, 0);
4910 emit_jump_insn (gen_jump (label
));
4913 /* The sign bit is set.
4914 Convert to a usable (positive signed) value by shifting right
4915 one bit, while remembering if a nonzero bit was shifted
4916 out; i.e., compute (from & 1) | (from >> 1). */
4918 emit_label (neglabel
);
4919 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4920 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4921 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4922 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4924 expand_float (target
, temp
, 0);
4926 /* Multiply by 2 to undo the shift above. */
4927 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4928 target
, 0, OPTAB_LIB_WIDEN
);
4930 emit_move_insn (target
, temp
);
4932 do_pending_stack_adjust ();
4938 /* If we are about to do some arithmetic to correct for an
4939 unsigned operand, do it in a pseudo-register. */
4941 if (GET_MODE (to
) != fmode
4942 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4943 target
= gen_reg_rtx (fmode
);
4945 /* Convert as signed integer to floating. */
4946 expand_float (target
, from
, 0);
4948 /* If FROM is negative (and therefore TO is negative),
4949 correct its value by 2**bitwidth. */
4951 do_pending_stack_adjust ();
4952 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4956 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
4957 temp
= expand_binop (fmode
, add_optab
, target
,
4958 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4959 target
, 0, OPTAB_LIB_WIDEN
);
4961 emit_move_insn (target
, temp
);
4963 do_pending_stack_adjust ();
4968 /* No hardware instruction available; call a library routine. */
4973 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4975 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4976 from
= convert_to_mode (SImode
, from
, unsignedp
);
4978 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4979 gcc_assert (libfunc
);
4983 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4984 GET_MODE (to
), 1, from
,
4986 insns
= get_insns ();
4989 emit_libcall_block (insns
, target
, value
,
4990 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4991 GET_MODE (to
), from
));
4996 /* Copy result to requested destination
4997 if we have been computing in a temp location. */
5001 if (GET_MODE (target
) == GET_MODE (to
))
5002 emit_move_insn (to
, target
);
5004 convert_move (to
, target
, 0);
5008 /* Generate code to convert FROM to fixed point and store in TO. FROM
5009 must be floating point. */
5012 expand_fix (rtx to
, rtx from
, int unsignedp
)
5014 enum insn_code icode
;
5016 enum machine_mode fmode
, imode
;
5019 /* We first try to find a pair of modes, one real and one integer, at
5020 least as wide as FROM and TO, respectively, in which we can open-code
5021 this conversion. If the integer mode is wider than the mode of TO,
5022 we can do the conversion either signed or unsigned. */
5024 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5025 fmode
= GET_MODE_WIDER_MODE (fmode
))
5026 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5027 imode
= GET_MODE_WIDER_MODE (imode
))
5029 int doing_unsigned
= unsignedp
;
5031 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5032 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5033 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5035 if (icode
!= CODE_FOR_nothing
)
5037 rtx last
= get_last_insn ();
5038 if (fmode
!= GET_MODE (from
))
5039 from
= convert_to_mode (fmode
, from
, 0);
5043 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5044 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5048 if (imode
!= GET_MODE (to
))
5049 target
= gen_reg_rtx (imode
);
5051 if (maybe_emit_unop_insn (icode
, target
, from
,
5052 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5055 convert_move (to
, target
, unsignedp
);
5058 delete_insns_since (last
);
5062 /* For an unsigned conversion, there is one more way to do it.
5063 If we have a signed conversion, we generate code that compares
5064 the real value to the largest representable positive number. If if
5065 is smaller, the conversion is done normally. Otherwise, subtract
5066 one plus the highest signed number, convert, and add it back.
5068 We only need to check all real modes, since we know we didn't find
5069 anything with a wider integer mode.
5071 This code used to extend FP value into mode wider than the destination.
5072 This is needed for decimal float modes which cannot accurately
5073 represent one plus the highest signed number of the same size, but
5074 not for binary modes. Consider, for instance conversion from SFmode
5077 The hot path through the code is dealing with inputs smaller than 2^63
5078 and doing just the conversion, so there is no bits to lose.
5080 In the other path we know the value is positive in the range 2^63..2^64-1
5081 inclusive. (as for other input overflow happens and result is undefined)
5082 So we know that the most important bit set in mantissa corresponds to
5083 2^63. The subtraction of 2^63 should not generate any rounding as it
5084 simply clears out that bit. The rest is trivial. */
5086 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5087 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5088 fmode
= GET_MODE_WIDER_MODE (fmode
))
5089 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5090 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5091 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
5094 REAL_VALUE_TYPE offset
;
5095 rtx limit
, lab1
, lab2
, insn
;
5097 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
5098 real_2expN (&offset
, bitsize
- 1, fmode
);
5099 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5100 lab1
= gen_label_rtx ();
5101 lab2
= gen_label_rtx ();
5103 if (fmode
!= GET_MODE (from
))
5104 from
= convert_to_mode (fmode
, from
, 0);
5106 /* See if we need to do the subtraction. */
5107 do_pending_stack_adjust ();
5108 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5111 /* If not, do the signed "fix" and branch around fixup code. */
5112 expand_fix (to
, from
, 0);
5113 emit_jump_insn (gen_jump (lab2
));
5116 /* Otherwise, subtract 2**(N-1), convert to signed number,
5117 then add 2**(N-1). Do the addition using XOR since this
5118 will often generate better code. */
5120 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5121 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5122 expand_fix (to
, target
, 0);
5123 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5125 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5127 to
, 1, OPTAB_LIB_WIDEN
);
5130 emit_move_insn (to
, target
);
5134 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
5136 /* Make a place for a REG_NOTE and add it. */
5137 insn
= emit_move_insn (to
, to
);
5138 set_unique_reg_note (insn
,
5140 gen_rtx_fmt_e (UNSIGNED_FIX
,
5148 /* We can't do it with an insn, so use a library call. But first ensure
5149 that the mode of TO is at least as wide as SImode, since those are the
5150 only library calls we know about. */
5152 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5154 target
= gen_reg_rtx (SImode
);
5156 expand_fix (target
, from
, unsignedp
);
5164 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5165 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5166 gcc_assert (libfunc
);
5170 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5171 GET_MODE (to
), 1, from
,
5173 insns
= get_insns ();
5176 emit_libcall_block (insns
, target
, value
,
5177 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5178 GET_MODE (to
), from
));
5183 if (GET_MODE (to
) == GET_MODE (target
))
5184 emit_move_insn (to
, target
);
5186 convert_move (to
, target
, 0);
5190 /* Generate code to convert FROM or TO a fixed-point.
5191 If UINTP is true, either TO or FROM is an unsigned integer.
5192 If SATP is true, we need to saturate the result. */
5195 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5197 enum machine_mode to_mode
= GET_MODE (to
);
5198 enum machine_mode from_mode
= GET_MODE (from
);
5200 enum rtx_code this_code
;
5201 enum insn_code code
;
5205 if (to_mode
== from_mode
)
5207 emit_move_insn (to
, from
);
5213 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5214 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5218 tab
= satp
? satfract_optab
: fract_optab
;
5219 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5221 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5222 if (code
!= CODE_FOR_nothing
)
5224 emit_unop_insn (code
, to
, from
, this_code
);
5228 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5229 gcc_assert (libfunc
);
5232 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5233 1, from
, from_mode
);
5234 insns
= get_insns ();
5237 emit_libcall_block (insns
, to
, value
,
5238 gen_rtx_fmt_e (tab
->code
, to_mode
, from
));
5241 /* Generate code to convert FROM to fixed point and store in TO. FROM
5242 must be floating point, TO must be signed. Use the conversion optab
5243 TAB to do the conversion. */
5246 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5248 enum insn_code icode
;
5250 enum machine_mode fmode
, imode
;
5252 /* We first try to find a pair of modes, one real and one integer, at
5253 least as wide as FROM and TO, respectively, in which we can open-code
5254 this conversion. If the integer mode is wider than the mode of TO,
5255 we can do the conversion either signed or unsigned. */
5257 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5258 fmode
= GET_MODE_WIDER_MODE (fmode
))
5259 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5260 imode
= GET_MODE_WIDER_MODE (imode
))
5262 icode
= convert_optab_handler (tab
, imode
, fmode
);
5263 if (icode
!= CODE_FOR_nothing
)
5265 rtx last
= get_last_insn ();
5266 if (fmode
!= GET_MODE (from
))
5267 from
= convert_to_mode (fmode
, from
, 0);
5269 if (imode
!= GET_MODE (to
))
5270 target
= gen_reg_rtx (imode
);
5272 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5274 delete_insns_since (last
);
5278 convert_move (to
, target
, 0);
5286 /* Report whether we have an instruction to perform the operation
5287 specified by CODE on operands of mode MODE. */
5289 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5291 return (code_to_optab
[(int) code
] != 0
5292 && (optab_handler (code_to_optab
[(int) code
], mode
)
5293 != CODE_FOR_nothing
));
5296 /* Set all insn_code fields to CODE_FOR_nothing. */
5299 init_insn_codes (void)
5301 memset (optab_table
, 0, sizeof (optab_table
));
5302 memset (convert_optab_table
, 0, sizeof (convert_optab_table
));
5303 memset (direct_optab_table
, 0, sizeof (direct_optab_table
));
5306 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5308 init_optab (optab op
, enum rtx_code code
)
5311 code_to_optab
[(int) code
] = op
;
5314 /* Same, but fill in its code as CODE, and do _not_ write it into
5315 the code_to_optab table. */
5317 init_optabv (optab op
, enum rtx_code code
)
5322 /* Conversion optabs never go in the code_to_optab table. */
5324 init_convert_optab (convert_optab op
, enum rtx_code code
)
5329 /* Initialize the libfunc fields of an entire group of entries in some
5330 optab. Each entry is set equal to a string consisting of a leading
5331 pair of underscores followed by a generic operation name followed by
5332 a mode name (downshifted to lowercase) followed by a single character
5333 representing the number of operands for the given operation (which is
5334 usually one of the characters '2', '3', or '4').
5336 OPTABLE is the table in which libfunc fields are to be initialized.
5337 OPNAME is the generic (string) name of the operation.
5338 SUFFIX is the character which specifies the number of operands for
5339 the given generic operation.
5340 MODE is the mode to generate for.
5344 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5346 unsigned opname_len
= strlen (opname
);
5347 const char *mname
= GET_MODE_NAME (mode
);
5348 unsigned mname_len
= strlen (mname
);
5349 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5350 int len
= prefix_len
+ opname_len
+ mname_len
+ 1 + 1;
5351 char *libfunc_name
= XALLOCAVEC (char, len
);
5358 if (targetm
.libfunc_gnu_prefix
)
5365 for (q
= opname
; *q
; )
5367 for (q
= mname
; *q
; q
++)
5368 *p
++ = TOLOWER (*q
);
5372 set_optab_libfunc (optable
, mode
,
5373 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5376 /* Like gen_libfunc, but verify that integer operation is involved. */
5379 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5380 enum machine_mode mode
)
5382 int maxsize
= 2 * BITS_PER_WORD
;
5384 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5386 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5387 maxsize
= LONG_LONG_TYPE_SIZE
;
5388 if (GET_MODE_CLASS (mode
) != MODE_INT
5389 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5391 gen_libfunc (optable
, opname
, suffix
, mode
);
5394 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5397 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5398 enum machine_mode mode
)
5402 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5403 gen_libfunc (optable
, opname
, suffix
, mode
);
5404 if (DECIMAL_FLOAT_MODE_P (mode
))
5406 dec_opname
= XALLOCAVEC (char, sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5407 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5408 depending on the low level floating format used. */
5409 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5410 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5411 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5415 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5418 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5419 enum machine_mode mode
)
5421 if (!ALL_FIXED_POINT_MODE_P (mode
))
5423 gen_libfunc (optable
, opname
, suffix
, mode
);
5426 /* Like gen_libfunc, but verify that signed fixed-point operation is
5430 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5431 enum machine_mode mode
)
5433 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5435 gen_libfunc (optable
, opname
, suffix
, mode
);
5438 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5442 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5443 enum machine_mode mode
)
5445 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5447 gen_libfunc (optable
, opname
, suffix
, mode
);
5450 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5453 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5454 enum machine_mode mode
)
5456 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5457 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5458 if (INTEGRAL_MODE_P (mode
))
5459 gen_int_libfunc (optable
, name
, suffix
, mode
);
5462 /* Like gen_libfunc, but verify that FP or INT operation is involved
5463 and add 'v' suffix for integer operation. */
5466 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5467 enum machine_mode mode
)
5469 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5470 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5471 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5473 int len
= strlen (name
);
5474 char *v_name
= XALLOCAVEC (char, len
+ 2);
5475 strcpy (v_name
, name
);
5477 v_name
[len
+ 1] = 0;
5478 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5482 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5486 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5487 enum machine_mode mode
)
5489 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5490 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5491 if (INTEGRAL_MODE_P (mode
))
5492 gen_int_libfunc (optable
, name
, suffix
, mode
);
5493 if (ALL_FIXED_POINT_MODE_P (mode
))
5494 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5497 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5501 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5502 enum machine_mode mode
)
5504 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5505 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5506 if (INTEGRAL_MODE_P (mode
))
5507 gen_int_libfunc (optable
, name
, suffix
, mode
);
5508 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5509 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5512 /* Like gen_libfunc, but verify that INT or FIXED operation is
5516 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5517 enum machine_mode mode
)
5519 if (INTEGRAL_MODE_P (mode
))
5520 gen_int_libfunc (optable
, name
, suffix
, mode
);
5521 if (ALL_FIXED_POINT_MODE_P (mode
))
5522 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5525 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5529 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5530 enum machine_mode mode
)
5532 if (INTEGRAL_MODE_P (mode
))
5533 gen_int_libfunc (optable
, name
, suffix
, mode
);
5534 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5535 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5538 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5542 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5543 enum machine_mode mode
)
5545 if (INTEGRAL_MODE_P (mode
))
5546 gen_int_libfunc (optable
, name
, suffix
, mode
);
5547 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5548 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5551 /* Initialize the libfunc fields of an entire group of entries of an
5552 inter-mode-class conversion optab. The string formation rules are
5553 similar to the ones for init_libfuncs, above, but instead of having
5554 a mode name and an operand count these functions have two mode names
5555 and no operand count. */
5558 gen_interclass_conv_libfunc (convert_optab tab
,
5560 enum machine_mode tmode
,
5561 enum machine_mode fmode
)
5563 size_t opname_len
= strlen (opname
);
5564 size_t mname_len
= 0;
5566 const char *fname
, *tname
;
5568 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5569 char *libfunc_name
, *suffix
;
5570 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5573 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5574 depends on which underlying decimal floating point format is used. */
5575 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5577 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5579 nondec_name
= XALLOCAVEC (char, prefix_len
+ opname_len
+ mname_len
+ 1 + 1);
5580 nondec_name
[0] = '_';
5581 nondec_name
[1] = '_';
5582 if (targetm
.libfunc_gnu_prefix
)
5584 nondec_name
[2] = 'g';
5585 nondec_name
[3] = 'n';
5586 nondec_name
[4] = 'u';
5587 nondec_name
[5] = '_';
5590 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5591 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5593 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5596 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5597 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5598 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5600 fname
= GET_MODE_NAME (fmode
);
5601 tname
= GET_MODE_NAME (tmode
);
5603 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5605 libfunc_name
= dec_name
;
5606 suffix
= dec_suffix
;
5610 libfunc_name
= nondec_name
;
5611 suffix
= nondec_suffix
;
5615 for (q
= fname
; *q
; p
++, q
++)
5617 for (q
= tname
; *q
; p
++, q
++)
5622 set_conv_libfunc (tab
, tmode
, fmode
,
5623 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5626 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5627 int->fp conversion. */
5630 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5632 enum machine_mode tmode
,
5633 enum machine_mode fmode
)
5635 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5637 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5639 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5642 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5646 gen_ufloat_conv_libfunc (convert_optab tab
,
5647 const char *opname ATTRIBUTE_UNUSED
,
5648 enum machine_mode tmode
,
5649 enum machine_mode fmode
)
5651 if (DECIMAL_FLOAT_MODE_P (tmode
))
5652 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5654 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5657 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5658 fp->int conversion. */
5661 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5663 enum machine_mode tmode
,
5664 enum machine_mode fmode
)
5666 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5668 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5670 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5673 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5674 fp->int conversion with no decimal floating point involved. */
5677 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5679 enum machine_mode tmode
,
5680 enum machine_mode fmode
)
5682 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5684 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5686 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5689 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5690 The string formation rules are
5691 similar to the ones for init_libfunc, above. */
5694 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5695 enum machine_mode tmode
, enum machine_mode fmode
)
5697 size_t opname_len
= strlen (opname
);
5698 size_t mname_len
= 0;
5700 const char *fname
, *tname
;
5702 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5703 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5704 char *libfunc_name
, *suffix
;
5707 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5708 depends on which underlying decimal floating point format is used. */
5709 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5711 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5713 nondec_name
= XALLOCAVEC (char, 2 + opname_len
+ mname_len
+ 1 + 1);
5714 nondec_name
[0] = '_';
5715 nondec_name
[1] = '_';
5716 if (targetm
.libfunc_gnu_prefix
)
5718 nondec_name
[2] = 'g';
5719 nondec_name
[3] = 'n';
5720 nondec_name
[4] = 'u';
5721 nondec_name
[5] = '_';
5723 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5724 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5726 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5729 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5730 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5731 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5733 fname
= GET_MODE_NAME (fmode
);
5734 tname
= GET_MODE_NAME (tmode
);
5736 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5738 libfunc_name
= dec_name
;
5739 suffix
= dec_suffix
;
5743 libfunc_name
= nondec_name
;
5744 suffix
= nondec_suffix
;
5748 for (q
= fname
; *q
; p
++, q
++)
5750 for (q
= tname
; *q
; p
++, q
++)
5756 set_conv_libfunc (tab
, tmode
, fmode
,
5757 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5760 /* Pick proper libcall for trunc_optab. We need to chose if we do
5761 truncation or extension and interclass or intraclass. */
5764 gen_trunc_conv_libfunc (convert_optab tab
,
5766 enum machine_mode tmode
,
5767 enum machine_mode fmode
)
5769 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5771 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5776 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5777 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5778 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5780 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
5783 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5784 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5785 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5786 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5789 /* Pick proper libcall for extend_optab. We need to chose if we do
5790 truncation or extension and interclass or intraclass. */
5793 gen_extend_conv_libfunc (convert_optab tab
,
5794 const char *opname ATTRIBUTE_UNUSED
,
5795 enum machine_mode tmode
,
5796 enum machine_mode fmode
)
5798 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5800 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5805 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5806 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5807 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5809 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
5812 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5813 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5814 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5815 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5818 /* Pick proper libcall for fract_optab. We need to chose if we do
5819 interclass or intraclass. */
5822 gen_fract_conv_libfunc (convert_optab tab
,
5824 enum machine_mode tmode
,
5825 enum machine_mode fmode
)
5829 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
5832 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
5833 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5835 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5838 /* Pick proper libcall for fractuns_optab. */
5841 gen_fractuns_conv_libfunc (convert_optab tab
,
5843 enum machine_mode tmode
,
5844 enum machine_mode fmode
)
5848 /* One mode must be a fixed-point mode, and the other must be an integer
5850 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
5851 || (ALL_FIXED_POINT_MODE_P (fmode
)
5852 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
5855 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5858 /* Pick proper libcall for satfract_optab. We need to chose if we do
5859 interclass or intraclass. */
5862 gen_satfract_conv_libfunc (convert_optab tab
,
5864 enum machine_mode tmode
,
5865 enum machine_mode fmode
)
5869 /* TMODE must be a fixed-point mode. */
5870 if (!ALL_FIXED_POINT_MODE_P (tmode
))
5873 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
5874 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5876 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5879 /* Pick proper libcall for satfractuns_optab. */
5882 gen_satfractuns_conv_libfunc (convert_optab tab
,
5884 enum machine_mode tmode
,
5885 enum machine_mode fmode
)
5889 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
5890 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
5893 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5896 /* A table of previously-created libfuncs, hashed by name. */
5897 static GTY ((param_is (union tree_node
))) htab_t libfunc_decls
;
5899 /* Hashtable callbacks for libfunc_decls. */
5902 libfunc_decl_hash (const void *entry
)
5904 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree
) entry
));
5908 libfunc_decl_eq (const void *entry1
, const void *entry2
)
5910 return DECL_NAME ((const_tree
) entry1
) == (const_tree
) entry2
;
5913 /* Build a decl for a libfunc named NAME. */
5916 build_libfunc_function (const char *name
)
5918 tree decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
5919 get_identifier (name
),
5920 build_function_type (integer_type_node
, NULL_TREE
));
5921 /* ??? We don't have any type information except for this is
5922 a function. Pretend this is "int foo()". */
5923 DECL_ARTIFICIAL (decl
) = 1;
5924 DECL_EXTERNAL (decl
) = 1;
5925 TREE_PUBLIC (decl
) = 1;
5926 gcc_assert (DECL_ASSEMBLER_NAME (decl
));
5928 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5929 are the flags assigned by targetm.encode_section_info. */
5930 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
5936 init_one_libfunc (const char *name
)
5942 if (libfunc_decls
== NULL
)
5943 libfunc_decls
= htab_create_ggc (37, libfunc_decl_hash
,
5944 libfunc_decl_eq
, NULL
);
5946 /* See if we have already created a libfunc decl for this function. */
5947 id
= get_identifier (name
);
5948 hash
= IDENTIFIER_HASH_VALUE (id
);
5949 slot
= htab_find_slot_with_hash (libfunc_decls
, id
, hash
, INSERT
);
5950 decl
= (tree
) *slot
;
5953 /* Create a new decl, so that it can be passed to
5954 targetm.encode_section_info. */
5955 decl
= build_libfunc_function (name
);
5958 return XEXP (DECL_RTL (decl
), 0);
5961 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
5964 set_user_assembler_libfunc (const char *name
, const char *asmspec
)
5970 id
= get_identifier (name
);
5971 hash
= IDENTIFIER_HASH_VALUE (id
);
5972 slot
= htab_find_slot_with_hash (libfunc_decls
, id
, hash
, NO_INSERT
);
5974 decl
= (tree
) *slot
;
5975 set_user_assembler_name (decl
, asmspec
);
5976 return XEXP (DECL_RTL (decl
), 0);
5979 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5980 MODE to NAME, which should be either 0 or a string constant. */
5982 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5985 struct libfunc_entry e
;
5986 struct libfunc_entry
**slot
;
5987 e
.optab
= (size_t) (optable
- &optab_table
[0]);
5992 val
= init_one_libfunc (name
);
5995 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
5997 *slot
= ggc_alloc_libfunc_entry ();
5998 (*slot
)->optab
= (size_t) (optable
- &optab_table
[0]);
5999 (*slot
)->mode1
= mode
;
6000 (*slot
)->mode2
= VOIDmode
;
6001 (*slot
)->libfunc
= val
;
6004 /* Call this to reset the function entry for one conversion optab
6005 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6006 either 0 or a string constant. */
6008 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
6009 enum machine_mode fmode
, const char *name
)
6012 struct libfunc_entry e
;
6013 struct libfunc_entry
**slot
;
6014 e
.optab
= (size_t) (optable
- &convert_optab_table
[0]);
6019 val
= init_one_libfunc (name
);
6022 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6024 *slot
= ggc_alloc_libfunc_entry ();
6025 (*slot
)->optab
= (size_t) (optable
- &convert_optab_table
[0]);
6026 (*slot
)->mode1
= tmode
;
6027 (*slot
)->mode2
= fmode
;
6028 (*slot
)->libfunc
= val
;
6031 /* Call this to initialize the contents of the optabs
6032 appropriately for the current target machine. */
6039 htab_empty (libfunc_hash
);
6040 /* We statically initialize the insn_codes with the equivalent of
6041 CODE_FOR_nothing. Repeat the process if reinitialising. */
6045 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
6047 init_optab (add_optab
, PLUS
);
6048 init_optabv (addv_optab
, PLUS
);
6049 init_optab (sub_optab
, MINUS
);
6050 init_optabv (subv_optab
, MINUS
);
6051 init_optab (ssadd_optab
, SS_PLUS
);
6052 init_optab (usadd_optab
, US_PLUS
);
6053 init_optab (sssub_optab
, SS_MINUS
);
6054 init_optab (ussub_optab
, US_MINUS
);
6055 init_optab (smul_optab
, MULT
);
6056 init_optab (ssmul_optab
, SS_MULT
);
6057 init_optab (usmul_optab
, US_MULT
);
6058 init_optabv (smulv_optab
, MULT
);
6059 init_optab (smul_highpart_optab
, UNKNOWN
);
6060 init_optab (umul_highpart_optab
, UNKNOWN
);
6061 init_optab (smul_widen_optab
, UNKNOWN
);
6062 init_optab (umul_widen_optab
, UNKNOWN
);
6063 init_optab (usmul_widen_optab
, UNKNOWN
);
6064 init_optab (smadd_widen_optab
, UNKNOWN
);
6065 init_optab (umadd_widen_optab
, UNKNOWN
);
6066 init_optab (ssmadd_widen_optab
, UNKNOWN
);
6067 init_optab (usmadd_widen_optab
, UNKNOWN
);
6068 init_optab (smsub_widen_optab
, UNKNOWN
);
6069 init_optab (umsub_widen_optab
, UNKNOWN
);
6070 init_optab (ssmsub_widen_optab
, UNKNOWN
);
6071 init_optab (usmsub_widen_optab
, UNKNOWN
);
6072 init_optab (sdiv_optab
, DIV
);
6073 init_optab (ssdiv_optab
, SS_DIV
);
6074 init_optab (usdiv_optab
, US_DIV
);
6075 init_optabv (sdivv_optab
, DIV
);
6076 init_optab (sdivmod_optab
, UNKNOWN
);
6077 init_optab (udiv_optab
, UDIV
);
6078 init_optab (udivmod_optab
, UNKNOWN
);
6079 init_optab (smod_optab
, MOD
);
6080 init_optab (umod_optab
, UMOD
);
6081 init_optab (fmod_optab
, UNKNOWN
);
6082 init_optab (remainder_optab
, UNKNOWN
);
6083 init_optab (ftrunc_optab
, UNKNOWN
);
6084 init_optab (and_optab
, AND
);
6085 init_optab (ior_optab
, IOR
);
6086 init_optab (xor_optab
, XOR
);
6087 init_optab (ashl_optab
, ASHIFT
);
6088 init_optab (ssashl_optab
, SS_ASHIFT
);
6089 init_optab (usashl_optab
, US_ASHIFT
);
6090 init_optab (ashr_optab
, ASHIFTRT
);
6091 init_optab (lshr_optab
, LSHIFTRT
);
6092 init_optabv (vashl_optab
, ASHIFT
);
6093 init_optabv (vashr_optab
, ASHIFTRT
);
6094 init_optabv (vlshr_optab
, LSHIFTRT
);
6095 init_optab (rotl_optab
, ROTATE
);
6096 init_optab (rotr_optab
, ROTATERT
);
6097 init_optab (smin_optab
, SMIN
);
6098 init_optab (smax_optab
, SMAX
);
6099 init_optab (umin_optab
, UMIN
);
6100 init_optab (umax_optab
, UMAX
);
6101 init_optab (pow_optab
, UNKNOWN
);
6102 init_optab (atan2_optab
, UNKNOWN
);
6103 init_optab (fma_optab
, FMA
);
6104 init_optab (fms_optab
, UNKNOWN
);
6105 init_optab (fnma_optab
, UNKNOWN
);
6106 init_optab (fnms_optab
, UNKNOWN
);
6108 /* These three have codes assigned exclusively for the sake of
6110 init_optab (mov_optab
, SET
);
6111 init_optab (movstrict_optab
, STRICT_LOW_PART
);
6112 init_optab (cbranch_optab
, COMPARE
);
6114 init_optab (cmov_optab
, UNKNOWN
);
6115 init_optab (cstore_optab
, UNKNOWN
);
6116 init_optab (ctrap_optab
, UNKNOWN
);
6118 init_optab (storent_optab
, UNKNOWN
);
6120 init_optab (cmp_optab
, UNKNOWN
);
6121 init_optab (ucmp_optab
, UNKNOWN
);
6123 init_optab (eq_optab
, EQ
);
6124 init_optab (ne_optab
, NE
);
6125 init_optab (gt_optab
, GT
);
6126 init_optab (ge_optab
, GE
);
6127 init_optab (lt_optab
, LT
);
6128 init_optab (le_optab
, LE
);
6129 init_optab (unord_optab
, UNORDERED
);
6131 init_optab (neg_optab
, NEG
);
6132 init_optab (ssneg_optab
, SS_NEG
);
6133 init_optab (usneg_optab
, US_NEG
);
6134 init_optabv (negv_optab
, NEG
);
6135 init_optab (abs_optab
, ABS
);
6136 init_optabv (absv_optab
, ABS
);
6137 init_optab (addcc_optab
, UNKNOWN
);
6138 init_optab (one_cmpl_optab
, NOT
);
6139 init_optab (bswap_optab
, BSWAP
);
6140 init_optab (ffs_optab
, FFS
);
6141 init_optab (clz_optab
, CLZ
);
6142 init_optab (ctz_optab
, CTZ
);
6143 init_optab (clrsb_optab
, CLRSB
);
6144 init_optab (popcount_optab
, POPCOUNT
);
6145 init_optab (parity_optab
, PARITY
);
6146 init_optab (sqrt_optab
, SQRT
);
6147 init_optab (floor_optab
, UNKNOWN
);
6148 init_optab (ceil_optab
, UNKNOWN
);
6149 init_optab (round_optab
, UNKNOWN
);
6150 init_optab (btrunc_optab
, UNKNOWN
);
6151 init_optab (nearbyint_optab
, UNKNOWN
);
6152 init_optab (rint_optab
, UNKNOWN
);
6153 init_optab (sincos_optab
, UNKNOWN
);
6154 init_optab (sin_optab
, UNKNOWN
);
6155 init_optab (asin_optab
, UNKNOWN
);
6156 init_optab (cos_optab
, UNKNOWN
);
6157 init_optab (acos_optab
, UNKNOWN
);
6158 init_optab (exp_optab
, UNKNOWN
);
6159 init_optab (exp10_optab
, UNKNOWN
);
6160 init_optab (exp2_optab
, UNKNOWN
);
6161 init_optab (expm1_optab
, UNKNOWN
);
6162 init_optab (ldexp_optab
, UNKNOWN
);
6163 init_optab (scalb_optab
, UNKNOWN
);
6164 init_optab (significand_optab
, UNKNOWN
);
6165 init_optab (logb_optab
, UNKNOWN
);
6166 init_optab (ilogb_optab
, UNKNOWN
);
6167 init_optab (log_optab
, UNKNOWN
);
6168 init_optab (log10_optab
, UNKNOWN
);
6169 init_optab (log2_optab
, UNKNOWN
);
6170 init_optab (log1p_optab
, UNKNOWN
);
6171 init_optab (tan_optab
, UNKNOWN
);
6172 init_optab (atan_optab
, UNKNOWN
);
6173 init_optab (copysign_optab
, UNKNOWN
);
6174 init_optab (signbit_optab
, UNKNOWN
);
6176 init_optab (isinf_optab
, UNKNOWN
);
6178 init_optab (strlen_optab
, UNKNOWN
);
6179 init_optab (push_optab
, UNKNOWN
);
6181 init_optab (reduc_smax_optab
, UNKNOWN
);
6182 init_optab (reduc_umax_optab
, UNKNOWN
);
6183 init_optab (reduc_smin_optab
, UNKNOWN
);
6184 init_optab (reduc_umin_optab
, UNKNOWN
);
6185 init_optab (reduc_splus_optab
, UNKNOWN
);
6186 init_optab (reduc_uplus_optab
, UNKNOWN
);
6188 init_optab (ssum_widen_optab
, UNKNOWN
);
6189 init_optab (usum_widen_optab
, UNKNOWN
);
6190 init_optab (sdot_prod_optab
, UNKNOWN
);
6191 init_optab (udot_prod_optab
, UNKNOWN
);
6193 init_optab (vec_extract_optab
, UNKNOWN
);
6194 init_optab (vec_extract_even_optab
, UNKNOWN
);
6195 init_optab (vec_extract_odd_optab
, UNKNOWN
);
6196 init_optab (vec_interleave_high_optab
, UNKNOWN
);
6197 init_optab (vec_interleave_low_optab
, UNKNOWN
);
6198 init_optab (vec_set_optab
, UNKNOWN
);
6199 init_optab (vec_init_optab
, UNKNOWN
);
6200 init_optab (vec_shl_optab
, UNKNOWN
);
6201 init_optab (vec_shr_optab
, UNKNOWN
);
6202 init_optab (vec_realign_load_optab
, UNKNOWN
);
6203 init_optab (movmisalign_optab
, UNKNOWN
);
6204 init_optab (vec_widen_umult_hi_optab
, UNKNOWN
);
6205 init_optab (vec_widen_umult_lo_optab
, UNKNOWN
);
6206 init_optab (vec_widen_smult_hi_optab
, UNKNOWN
);
6207 init_optab (vec_widen_smult_lo_optab
, UNKNOWN
);
6208 init_optab (vec_widen_ushiftl_hi_optab
, UNKNOWN
);
6209 init_optab (vec_widen_ushiftl_lo_optab
, UNKNOWN
);
6210 init_optab (vec_widen_sshiftl_hi_optab
, UNKNOWN
);
6211 init_optab (vec_widen_sshiftl_lo_optab
, UNKNOWN
);
6212 init_optab (vec_unpacks_hi_optab
, UNKNOWN
);
6213 init_optab (vec_unpacks_lo_optab
, UNKNOWN
);
6214 init_optab (vec_unpacku_hi_optab
, UNKNOWN
);
6215 init_optab (vec_unpacku_lo_optab
, UNKNOWN
);
6216 init_optab (vec_unpacks_float_hi_optab
, UNKNOWN
);
6217 init_optab (vec_unpacks_float_lo_optab
, UNKNOWN
);
6218 init_optab (vec_unpacku_float_hi_optab
, UNKNOWN
);
6219 init_optab (vec_unpacku_float_lo_optab
, UNKNOWN
);
6220 init_optab (vec_pack_trunc_optab
, UNKNOWN
);
6221 init_optab (vec_pack_usat_optab
, UNKNOWN
);
6222 init_optab (vec_pack_ssat_optab
, UNKNOWN
);
6223 init_optab (vec_pack_ufix_trunc_optab
, UNKNOWN
);
6224 init_optab (vec_pack_sfix_trunc_optab
, UNKNOWN
);
6226 init_optab (powi_optab
, UNKNOWN
);
6229 init_convert_optab (sext_optab
, SIGN_EXTEND
);
6230 init_convert_optab (zext_optab
, ZERO_EXTEND
);
6231 init_convert_optab (trunc_optab
, TRUNCATE
);
6232 init_convert_optab (sfix_optab
, FIX
);
6233 init_convert_optab (ufix_optab
, UNSIGNED_FIX
);
6234 init_convert_optab (sfixtrunc_optab
, UNKNOWN
);
6235 init_convert_optab (ufixtrunc_optab
, UNKNOWN
);
6236 init_convert_optab (sfloat_optab
, FLOAT
);
6237 init_convert_optab (ufloat_optab
, UNSIGNED_FLOAT
);
6238 init_convert_optab (lrint_optab
, UNKNOWN
);
6239 init_convert_optab (lround_optab
, UNKNOWN
);
6240 init_convert_optab (lfloor_optab
, UNKNOWN
);
6241 init_convert_optab (lceil_optab
, UNKNOWN
);
6243 init_convert_optab (fract_optab
, FRACT_CONVERT
);
6244 init_convert_optab (fractuns_optab
, UNSIGNED_FRACT_CONVERT
);
6245 init_convert_optab (satfract_optab
, SAT_FRACT
);
6246 init_convert_optab (satfractuns_optab
, UNSIGNED_SAT_FRACT
);
6248 /* Fill in the optabs with the insns we support. */
6251 /* Initialize the optabs with the names of the library functions. */
6252 add_optab
->libcall_basename
= "add";
6253 add_optab
->libcall_suffix
= '3';
6254 add_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6255 addv_optab
->libcall_basename
= "add";
6256 addv_optab
->libcall_suffix
= '3';
6257 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6258 ssadd_optab
->libcall_basename
= "ssadd";
6259 ssadd_optab
->libcall_suffix
= '3';
6260 ssadd_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6261 usadd_optab
->libcall_basename
= "usadd";
6262 usadd_optab
->libcall_suffix
= '3';
6263 usadd_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6264 sub_optab
->libcall_basename
= "sub";
6265 sub_optab
->libcall_suffix
= '3';
6266 sub_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6267 subv_optab
->libcall_basename
= "sub";
6268 subv_optab
->libcall_suffix
= '3';
6269 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6270 sssub_optab
->libcall_basename
= "sssub";
6271 sssub_optab
->libcall_suffix
= '3';
6272 sssub_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6273 ussub_optab
->libcall_basename
= "ussub";
6274 ussub_optab
->libcall_suffix
= '3';
6275 ussub_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6276 smul_optab
->libcall_basename
= "mul";
6277 smul_optab
->libcall_suffix
= '3';
6278 smul_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6279 smulv_optab
->libcall_basename
= "mul";
6280 smulv_optab
->libcall_suffix
= '3';
6281 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6282 ssmul_optab
->libcall_basename
= "ssmul";
6283 ssmul_optab
->libcall_suffix
= '3';
6284 ssmul_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6285 usmul_optab
->libcall_basename
= "usmul";
6286 usmul_optab
->libcall_suffix
= '3';
6287 usmul_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6288 sdiv_optab
->libcall_basename
= "div";
6289 sdiv_optab
->libcall_suffix
= '3';
6290 sdiv_optab
->libcall_gen
= gen_int_fp_signed_fixed_libfunc
;
6291 sdivv_optab
->libcall_basename
= "divv";
6292 sdivv_optab
->libcall_suffix
= '3';
6293 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6294 ssdiv_optab
->libcall_basename
= "ssdiv";
6295 ssdiv_optab
->libcall_suffix
= '3';
6296 ssdiv_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6297 udiv_optab
->libcall_basename
= "udiv";
6298 udiv_optab
->libcall_suffix
= '3';
6299 udiv_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6300 usdiv_optab
->libcall_basename
= "usdiv";
6301 usdiv_optab
->libcall_suffix
= '3';
6302 usdiv_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6303 sdivmod_optab
->libcall_basename
= "divmod";
6304 sdivmod_optab
->libcall_suffix
= '4';
6305 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6306 udivmod_optab
->libcall_basename
= "udivmod";
6307 udivmod_optab
->libcall_suffix
= '4';
6308 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6309 smod_optab
->libcall_basename
= "mod";
6310 smod_optab
->libcall_suffix
= '3';
6311 smod_optab
->libcall_gen
= gen_int_libfunc
;
6312 umod_optab
->libcall_basename
= "umod";
6313 umod_optab
->libcall_suffix
= '3';
6314 umod_optab
->libcall_gen
= gen_int_libfunc
;
6315 ftrunc_optab
->libcall_basename
= "ftrunc";
6316 ftrunc_optab
->libcall_suffix
= '2';
6317 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6318 and_optab
->libcall_basename
= "and";
6319 and_optab
->libcall_suffix
= '3';
6320 and_optab
->libcall_gen
= gen_int_libfunc
;
6321 ior_optab
->libcall_basename
= "ior";
6322 ior_optab
->libcall_suffix
= '3';
6323 ior_optab
->libcall_gen
= gen_int_libfunc
;
6324 xor_optab
->libcall_basename
= "xor";
6325 xor_optab
->libcall_suffix
= '3';
6326 xor_optab
->libcall_gen
= gen_int_libfunc
;
6327 ashl_optab
->libcall_basename
= "ashl";
6328 ashl_optab
->libcall_suffix
= '3';
6329 ashl_optab
->libcall_gen
= gen_int_fixed_libfunc
;
6330 ssashl_optab
->libcall_basename
= "ssashl";
6331 ssashl_optab
->libcall_suffix
= '3';
6332 ssashl_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6333 usashl_optab
->libcall_basename
= "usashl";
6334 usashl_optab
->libcall_suffix
= '3';
6335 usashl_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6336 ashr_optab
->libcall_basename
= "ashr";
6337 ashr_optab
->libcall_suffix
= '3';
6338 ashr_optab
->libcall_gen
= gen_int_signed_fixed_libfunc
;
6339 lshr_optab
->libcall_basename
= "lshr";
6340 lshr_optab
->libcall_suffix
= '3';
6341 lshr_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6342 smin_optab
->libcall_basename
= "min";
6343 smin_optab
->libcall_suffix
= '3';
6344 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6345 smax_optab
->libcall_basename
= "max";
6346 smax_optab
->libcall_suffix
= '3';
6347 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6348 umin_optab
->libcall_basename
= "umin";
6349 umin_optab
->libcall_suffix
= '3';
6350 umin_optab
->libcall_gen
= gen_int_libfunc
;
6351 umax_optab
->libcall_basename
= "umax";
6352 umax_optab
->libcall_suffix
= '3';
6353 umax_optab
->libcall_gen
= gen_int_libfunc
;
6354 neg_optab
->libcall_basename
= "neg";
6355 neg_optab
->libcall_suffix
= '2';
6356 neg_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6357 ssneg_optab
->libcall_basename
= "ssneg";
6358 ssneg_optab
->libcall_suffix
= '2';
6359 ssneg_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6360 usneg_optab
->libcall_basename
= "usneg";
6361 usneg_optab
->libcall_suffix
= '2';
6362 usneg_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6363 negv_optab
->libcall_basename
= "neg";
6364 negv_optab
->libcall_suffix
= '2';
6365 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6366 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6367 one_cmpl_optab
->libcall_suffix
= '2';
6368 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6369 ffs_optab
->libcall_basename
= "ffs";
6370 ffs_optab
->libcall_suffix
= '2';
6371 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6372 clz_optab
->libcall_basename
= "clz";
6373 clz_optab
->libcall_suffix
= '2';
6374 clz_optab
->libcall_gen
= gen_int_libfunc
;
6375 ctz_optab
->libcall_basename
= "ctz";
6376 ctz_optab
->libcall_suffix
= '2';
6377 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6378 clrsb_optab
->libcall_basename
= "clrsb";
6379 clrsb_optab
->libcall_suffix
= '2';
6380 clrsb_optab
->libcall_gen
= gen_int_libfunc
;
6381 popcount_optab
->libcall_basename
= "popcount";
6382 popcount_optab
->libcall_suffix
= '2';
6383 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6384 parity_optab
->libcall_basename
= "parity";
6385 parity_optab
->libcall_suffix
= '2';
6386 parity_optab
->libcall_gen
= gen_int_libfunc
;
6388 /* Comparison libcalls for integers MUST come in pairs,
6390 cmp_optab
->libcall_basename
= "cmp";
6391 cmp_optab
->libcall_suffix
= '2';
6392 cmp_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6393 ucmp_optab
->libcall_basename
= "ucmp";
6394 ucmp_optab
->libcall_suffix
= '2';
6395 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6397 /* EQ etc are floating point only. */
6398 eq_optab
->libcall_basename
= "eq";
6399 eq_optab
->libcall_suffix
= '2';
6400 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6401 ne_optab
->libcall_basename
= "ne";
6402 ne_optab
->libcall_suffix
= '2';
6403 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6404 gt_optab
->libcall_basename
= "gt";
6405 gt_optab
->libcall_suffix
= '2';
6406 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6407 ge_optab
->libcall_basename
= "ge";
6408 ge_optab
->libcall_suffix
= '2';
6409 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6410 lt_optab
->libcall_basename
= "lt";
6411 lt_optab
->libcall_suffix
= '2';
6412 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6413 le_optab
->libcall_basename
= "le";
6414 le_optab
->libcall_suffix
= '2';
6415 le_optab
->libcall_gen
= gen_fp_libfunc
;
6416 unord_optab
->libcall_basename
= "unord";
6417 unord_optab
->libcall_suffix
= '2';
6418 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6420 powi_optab
->libcall_basename
= "powi";
6421 powi_optab
->libcall_suffix
= '2';
6422 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6425 sfloat_optab
->libcall_basename
= "float";
6426 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6427 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6428 sfix_optab
->libcall_basename
= "fix";
6429 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6430 ufix_optab
->libcall_basename
= "fixuns";
6431 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6432 lrint_optab
->libcall_basename
= "lrint";
6433 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6434 lround_optab
->libcall_basename
= "lround";
6435 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6436 lfloor_optab
->libcall_basename
= "lfloor";
6437 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6438 lceil_optab
->libcall_basename
= "lceil";
6439 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6441 /* trunc_optab is also used for FLOAT_EXTEND. */
6442 sext_optab
->libcall_basename
= "extend";
6443 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6444 trunc_optab
->libcall_basename
= "trunc";
6445 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6447 /* Conversions for fixed-point modes and other modes. */
6448 fract_optab
->libcall_basename
= "fract";
6449 fract_optab
->libcall_gen
= gen_fract_conv_libfunc
;
6450 satfract_optab
->libcall_basename
= "satfract";
6451 satfract_optab
->libcall_gen
= gen_satfract_conv_libfunc
;
6452 fractuns_optab
->libcall_basename
= "fractuns";
6453 fractuns_optab
->libcall_gen
= gen_fractuns_conv_libfunc
;
6454 satfractuns_optab
->libcall_basename
= "satfractuns";
6455 satfractuns_optab
->libcall_gen
= gen_satfractuns_conv_libfunc
;
6457 /* The ffs function operates on `int'. Fall back on it if we do not
6458 have a libgcc2 function for that width. */
6459 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6460 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6463 /* Explicitly initialize the bswap libfuncs since we need them to be
6464 valid for things other than word_mode. */
6465 if (targetm
.libfunc_gnu_prefix
)
6467 set_optab_libfunc (bswap_optab
, SImode
, "__gnu_bswapsi2");
6468 set_optab_libfunc (bswap_optab
, DImode
, "__gnu_bswapdi2");
6472 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6473 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6476 /* Use cabs for double complex abs, since systems generally have cabs.
6477 Don't define any libcall for float complex, so that cabs will be used. */
6478 if (complex_double_type_node
)
6479 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6481 abort_libfunc
= init_one_libfunc ("abort");
6482 memcpy_libfunc
= init_one_libfunc ("memcpy");
6483 memmove_libfunc
= init_one_libfunc ("memmove");
6484 memcmp_libfunc
= init_one_libfunc ("memcmp");
6485 memset_libfunc
= init_one_libfunc ("memset");
6486 setbits_libfunc
= init_one_libfunc ("__setbits");
6488 #ifndef DONT_USE_BUILTIN_SETJMP
6489 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6490 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6492 setjmp_libfunc
= init_one_libfunc ("setjmp");
6493 longjmp_libfunc
= init_one_libfunc ("longjmp");
6495 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6496 unwind_sjlj_unregister_libfunc
6497 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6499 /* For function entry/exit instrumentation. */
6500 profile_function_entry_libfunc
6501 = init_one_libfunc ("__cyg_profile_func_enter");
6502 profile_function_exit_libfunc
6503 = init_one_libfunc ("__cyg_profile_func_exit");
6505 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6507 /* Allow the target to add more libcalls or rename some, etc. */
6508 targetm
.init_libfuncs ();
6511 /* Print information about the current contents of the optabs on
6515 debug_optab_libfuncs (void)
6521 /* Dump the arithmetic optabs. */
6522 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6523 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6528 o
= &optab_table
[i
];
6529 l
= optab_libfunc (o
, (enum machine_mode
) j
);
6532 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6533 fprintf (stderr
, "%s\t%s:\t%s\n",
6534 GET_RTX_NAME (o
->code
),
6540 /* Dump the conversion optabs. */
6541 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6542 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6543 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6548 o
= &convert_optab_table
[i
];
6549 l
= convert_optab_libfunc (o
, (enum machine_mode
) j
,
6550 (enum machine_mode
) k
);
6553 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6554 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6555 GET_RTX_NAME (o
->code
),
6564 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6565 CODE. Return 0 on failure. */
6568 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
6570 enum machine_mode mode
= GET_MODE (op1
);
6571 enum insn_code icode
;
6575 if (mode
== VOIDmode
)
6578 icode
= optab_handler (ctrap_optab
, mode
);
6579 if (icode
== CODE_FOR_nothing
)
6582 /* Some targets only accept a zero trap code. */
6583 if (!insn_operand_matches (icode
, 3, tcode
))
6586 do_pending_stack_adjust ();
6588 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
6593 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
6596 /* If that failed, then give up. */
6604 insn
= get_insns ();
6609 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6610 or unsigned operation code. */
6612 static enum rtx_code
6613 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6625 code
= unsignedp
? LTU
: LT
;
6628 code
= unsignedp
? LEU
: LE
;
6631 code
= unsignedp
? GTU
: GT
;
6634 code
= unsignedp
? GEU
: GE
;
6637 case UNORDERED_EXPR
:
6668 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6669 unsigned operators. Do not generate compare instruction. */
6672 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6674 struct expand_operand ops
[2];
6675 enum rtx_code rcode
;
6677 rtx rtx_op0
, rtx_op1
;
6679 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6680 ensures that condition is a relational operation. */
6681 gcc_assert (COMPARISON_CLASS_P (cond
));
6683 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6684 t_op0
= TREE_OPERAND (cond
, 0);
6685 t_op1
= TREE_OPERAND (cond
, 1);
6687 /* Expand operands. */
6688 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6690 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6693 create_input_operand (&ops
[0], rtx_op0
, GET_MODE (rtx_op0
));
6694 create_input_operand (&ops
[1], rtx_op1
, GET_MODE (rtx_op1
));
6695 if (!maybe_legitimize_operands (icode
, 4, 2, ops
))
6697 return gen_rtx_fmt_ee (rcode
, VOIDmode
, ops
[0].value
, ops
[1].value
);
6700 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6701 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6704 can_vec_perm_expr_p (tree type
, tree sel
)
6706 enum machine_mode mode
, qimode
;
6707 mode
= TYPE_MODE (type
);
6709 /* If the target doesn't implement a vector mode for the vector type,
6710 then no operations are supported. */
6711 if (!VECTOR_MODE_P (mode
))
6714 if (sel
== NULL
|| TREE_CODE (sel
) == VECTOR_CST
)
6716 if (direct_optab_handler (vec_perm_const_optab
, mode
) != CODE_FOR_nothing
6717 && (sel
== NULL
|| targetm
.vectorize
.vec_perm_const_ok (type
, sel
)))
6721 if (direct_optab_handler (vec_perm_optab
, mode
) != CODE_FOR_nothing
)
6724 /* We allow fallback to a QI vector mode, and adjust the mask. */
6725 qimode
= mode_for_vector (QImode
, GET_MODE_SIZE (mode
));
6726 if (!VECTOR_MODE_P (qimode
))
6729 /* ??? For completeness, we ought to check the QImode version of
6730 vec_perm_const_optab. But all users of this implicit lowering
6731 feature implement the variable vec_perm_optab. */
6732 if (direct_optab_handler (vec_perm_optab
, qimode
) == CODE_FOR_nothing
)
6735 /* In order to support the lowering of non-constant permutations,
6736 we need to support shifts and adds. */
6737 if (sel
!= NULL
&& TREE_CODE (sel
) != VECTOR_CST
)
6739 if (GET_MODE_UNIT_SIZE (mode
) > 2
6740 && optab_handler (ashl_optab
, mode
) == CODE_FOR_nothing
6741 && optab_handler (vashl_optab
, mode
) == CODE_FOR_nothing
)
6743 if (optab_handler (add_optab
, qimode
) == CODE_FOR_nothing
)
6750 /* A subroutine of expand_vec_perm_expr for expanding one vec_perm insn. */
6753 expand_vec_perm_expr_1 (enum insn_code icode
, rtx target
,
6754 rtx v0
, rtx v1
, rtx sel
)
6756 enum machine_mode tmode
= GET_MODE (target
);
6757 enum machine_mode smode
= GET_MODE (sel
);
6758 struct expand_operand ops
[4];
6760 create_output_operand (&ops
[0], target
, tmode
);
6761 create_input_operand (&ops
[3], sel
, smode
);
6763 /* Make an effort to preserve v0 == v1. The target expander is able to
6764 rely on this to determine if we're permuting a single input operand. */
6765 if (rtx_equal_p (v0
, v1
))
6767 if (!insn_operand_matches (icode
, 1, v0
))
6768 v0
= force_reg (tmode
, v0
);
6769 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
6770 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
6772 create_fixed_operand (&ops
[1], v0
);
6773 create_fixed_operand (&ops
[2], v0
);
6777 create_input_operand (&ops
[1], v0
, tmode
);
6778 create_input_operand (&ops
[2], v1
, tmode
);
6781 if (maybe_expand_insn (icode
, 4, ops
))
6782 return ops
[0].value
;
6786 /* Generate instructions for VEC_PERM_EXPR given its type and three
6789 expand_vec_perm_expr (tree type
, tree v0
, tree v1
, tree sel
, rtx target
)
6791 enum insn_code icode
;
6792 enum machine_mode mode
= TYPE_MODE (type
);
6793 enum machine_mode qimode
;
6794 rtx v0_rtx
, v1_rtx
, sel_rtx
, *vec
, vt
, tmp
;
6795 unsigned int i
, w
, e
, u
;
6798 target
= gen_reg_rtx (mode
);
6799 v0_rtx
= expand_normal (v0
);
6800 if (operand_equal_p (v0
, v1
, 0))
6803 v1_rtx
= expand_normal (v1
);
6804 sel_rtx
= expand_normal (sel
);
6806 /* If the input is a constant, expand it specially. */
6807 if (CONSTANT_P (sel_rtx
))
6809 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
6810 if (icode
!= CODE_FOR_nothing
6811 && targetm
.vectorize
.vec_perm_const_ok (TREE_TYPE (v0
), sel
)
6812 && (tmp
= expand_vec_perm_expr_1 (icode
, target
, v0_rtx
,
6813 v1_rtx
, sel_rtx
)) != NULL
)
6817 /* Otherwise fall back to a fully variable permuation. */
6818 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6819 if (icode
!= CODE_FOR_nothing
6820 && (tmp
= expand_vec_perm_expr_1 (icode
, target
, v0_rtx
,
6821 v1_rtx
, sel_rtx
)) != NULL
)
6824 /* As a special case to aid several targets, lower the element-based
6825 permutation to a byte-based permutation and try again. */
6826 qimode
= mode_for_vector (QImode
, GET_MODE_SIZE (mode
));
6827 if (!VECTOR_MODE_P (qimode
))
6830 /* ??? For completeness, we ought to check the QImode version of
6831 vec_perm_const_optab. But all users of this implicit lowering
6832 feature implement the variable vec_perm_optab. */
6833 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6834 if (icode
== CODE_FOR_nothing
)
6837 w
= GET_MODE_SIZE (mode
);
6838 e
= GET_MODE_NUNITS (mode
);
6839 u
= GET_MODE_UNIT_SIZE (mode
);
6840 vec
= XALLOCAVEC (rtx
, w
);
6842 if (CONSTANT_P (sel_rtx
))
6845 for (i
= 0; i
< e
; ++i
)
6847 unsigned int this_e
= INTVAL (XVECEXP (sel_rtx
, 0, i
));
6848 this_e
&= 2 * e
- 1;
6851 for (j
= 0; j
< u
; ++j
)
6852 vec
[i
* u
+ j
] = GEN_INT (this_e
+ j
);
6854 sel_rtx
= gen_rtx_CONST_VECTOR (qimode
, gen_rtvec_v (w
, vec
));
6858 /* Multiply each element by its byte size. */
6860 sel_rtx
= expand_simple_binop (mode
, PLUS
, sel_rtx
, sel_rtx
,
6861 sel_rtx
, 0, OPTAB_DIRECT
);
6863 sel_rtx
= expand_simple_binop (mode
, ASHIFT
, sel_rtx
,
6864 GEN_INT (exact_log2 (u
)),
6865 sel_rtx
, 0, OPTAB_DIRECT
);
6866 gcc_assert (sel_rtx
);
6868 /* Broadcast the low byte each element into each of its bytes. */
6869 for (i
= 0; i
< w
; ++i
)
6871 int this_e
= i
/ u
* u
;
6872 if (BYTES_BIG_ENDIAN
)
6874 vec
[i
] = GEN_INT (this_e
);
6876 vt
= gen_rtx_CONST_VECTOR (qimode
, gen_rtvec_v (w
, vec
));
6877 sel_rtx
= gen_lowpart (qimode
, sel_rtx
);
6878 sel_rtx
= expand_vec_perm_expr_1 (icode
, gen_reg_rtx (qimode
),
6879 sel_rtx
, sel_rtx
, vt
);
6880 gcc_assert (sel_rtx
!= NULL
);
6882 /* Add the byte offset to each byte element. */
6883 /* Note that the definition of the indicies here is memory ordering,
6884 so there should be no difference between big and little endian. */
6885 for (i
= 0; i
< w
; ++i
)
6886 vec
[i
] = GEN_INT (i
% u
);
6887 vt
= gen_rtx_CONST_VECTOR (qimode
, gen_rtvec_v (w
, vec
));
6888 sel_rtx
= expand_simple_binop (qimode
, PLUS
, sel_rtx
, vt
,
6889 NULL_RTX
, 0, OPTAB_DIRECT
);
6890 gcc_assert (sel_rtx
);
6893 tmp
= expand_vec_perm_expr_1 (icode
, gen_lowpart (qimode
, target
),
6894 gen_lowpart (qimode
, v0_rtx
),
6895 gen_lowpart (qimode
, v1_rtx
), sel_rtx
);
6896 gcc_assert (tmp
!= NULL
);
6898 return gen_lowpart (mode
, tmp
);
6902 /* Return insn code for a conditional operator with a comparison in
6903 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
6905 static inline enum insn_code
6906 get_vcond_icode (enum machine_mode vmode
, enum machine_mode cmode
, bool uns
)
6908 enum insn_code icode
= CODE_FOR_nothing
;
6910 icode
= convert_optab_handler (vcondu_optab
, vmode
, cmode
);
6912 icode
= convert_optab_handler (vcond_optab
, vmode
, cmode
);
6916 /* Return TRUE iff, appropriate vector insns are available
6917 for vector cond expr with vector type VALUE_TYPE and a comparison
6918 with operand vector types in CMP_OP_TYPE. */
6921 expand_vec_cond_expr_p (tree value_type
, tree cmp_op_type
)
6923 enum machine_mode value_mode
= TYPE_MODE (value_type
);
6924 enum machine_mode cmp_op_mode
= TYPE_MODE (cmp_op_type
);
6925 if (GET_MODE_SIZE (value_mode
) != GET_MODE_SIZE (cmp_op_mode
)
6926 || GET_MODE_NUNITS (value_mode
) != GET_MODE_NUNITS (cmp_op_mode
)
6927 || get_vcond_icode (TYPE_MODE (value_type
), TYPE_MODE (cmp_op_type
),
6928 TYPE_UNSIGNED (cmp_op_type
)) == CODE_FOR_nothing
)
6933 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6937 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
6940 struct expand_operand ops
[6];
6941 enum insn_code icode
;
6942 rtx comparison
, rtx_op1
, rtx_op2
;
6943 enum machine_mode mode
= TYPE_MODE (vec_cond_type
);
6944 enum machine_mode cmp_op_mode
;
6947 gcc_assert (COMPARISON_CLASS_P (op0
));
6949 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0
, 0)));
6950 cmp_op_mode
= TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0
, 0)));
6952 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
6953 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
6955 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
6956 if (icode
== CODE_FOR_nothing
)
6959 comparison
= vector_compare_rtx (op0
, unsignedp
, icode
);
6960 rtx_op1
= expand_normal (op1
);
6961 rtx_op2
= expand_normal (op2
);
6963 create_output_operand (&ops
[0], target
, mode
);
6964 create_input_operand (&ops
[1], rtx_op1
, mode
);
6965 create_input_operand (&ops
[2], rtx_op2
, mode
);
6966 create_fixed_operand (&ops
[3], comparison
);
6967 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
6968 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
6969 expand_insn (icode
, 6, ops
);
6970 return ops
[0].value
;
6974 /* This is an internal subroutine of the other compare_and_swap expanders.
6975 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6976 operation. TARGET is an optional place to store the value result of
6977 the operation. ICODE is the particular instruction to expand. Return
6978 the result of the operation. */
6981 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
6982 rtx target
, enum insn_code icode
)
6984 struct expand_operand ops
[4];
6985 enum machine_mode mode
= GET_MODE (mem
);
6987 create_output_operand (&ops
[0], target
, mode
);
6988 create_fixed_operand (&ops
[1], mem
);
6989 /* OLD_VAL and NEW_VAL may have been promoted to a wider mode.
6990 Shrink them if so. */
6991 create_convert_operand_to (&ops
[2], old_val
, mode
, true);
6992 create_convert_operand_to (&ops
[3], new_val
, mode
, true);
6993 if (maybe_expand_insn (icode
, 4, ops
))
6994 return ops
[0].value
;
6998 /* Expand a compare-and-swap operation and return its value. */
7001 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7003 enum machine_mode mode
= GET_MODE (mem
);
7004 enum insn_code icode
7005 = direct_optab_handler (sync_compare_and_swap_optab
, mode
);
7007 if (icode
== CODE_FOR_nothing
)
7010 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
7013 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7017 find_cc_set (rtx x
, const_rtx pat
, void *data
)
7019 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
7020 && GET_CODE (pat
) == SET
)
7022 rtx
*p_cc_reg
= (rtx
*) data
;
7023 gcc_assert (!*p_cc_reg
);
7028 /* Expand a compare-and-swap operation and store true into the result if
7029 the operation was successful and false otherwise. Return the result.
7030 Unlike other routines, TARGET is not optional. */
7033 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7035 enum machine_mode mode
= GET_MODE (mem
);
7036 enum insn_code icode
;
7037 rtx subtarget
, seq
, cc_reg
;
7039 /* If the target supports a compare-and-swap pattern that simultaneously
7040 sets some flag for success, then use it. Otherwise use the regular
7041 compare-and-swap and follow that immediately with a compare insn. */
7042 icode
= direct_optab_handler (sync_compare_and_swap_optab
, mode
);
7043 if (icode
== CODE_FOR_nothing
)
7046 do_pending_stack_adjust ();
7050 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7053 if (subtarget
== NULL_RTX
)
7059 if (have_insn_for (COMPARE
, CCmode
))
7060 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
7064 /* We might be comparing against an old value. Try again. :-( */
7065 if (!cc_reg
&& MEM_P (old_val
))
7068 old_val
= force_reg (mode
, old_val
);
7075 return emit_store_flag_force (target
, EQ
, cc_reg
, const0_rtx
, VOIDmode
, 0, 1);
7077 return emit_store_flag_force (target
, EQ
, subtarget
, old_val
, VOIDmode
, 1, 1);
7080 /* This is a helper function for the other atomic operations. This function
7081 emits a loop that contains SEQ that iterates until a compare-and-swap
7082 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7083 a set of instructions that takes a value from OLD_REG as an input and
7084 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7085 set to the current contents of MEM. After SEQ, a compare-and-swap will
7086 attempt to update MEM with NEW_REG. The function returns true when the
7087 loop was generated successfully. */
7090 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7092 enum machine_mode mode
= GET_MODE (mem
);
7093 enum insn_code icode
;
7094 rtx label
, cmp_reg
, subtarget
, cc_reg
;
7096 /* The loop we want to generate looks like
7102 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7103 if (cmp_reg != old_reg)
7106 Note that we only do the plain load from memory once. Subsequent
7107 iterations use the value loaded by the compare-and-swap pattern. */
7109 label
= gen_label_rtx ();
7110 cmp_reg
= gen_reg_rtx (mode
);
7112 emit_move_insn (cmp_reg
, mem
);
7114 emit_move_insn (old_reg
, cmp_reg
);
7118 /* If the target supports a compare-and-swap pattern that simultaneously
7119 sets some flag for success, then use it. Otherwise use the regular
7120 compare-and-swap and follow that immediately with a compare insn. */
7121 icode
= direct_optab_handler (sync_compare_and_swap_optab
, mode
);
7122 if (icode
== CODE_FOR_nothing
)
7125 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7127 if (subtarget
== NULL_RTX
)
7131 if (have_insn_for (COMPARE
, CCmode
))
7132 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
7136 old_reg
= const0_rtx
;
7140 if (subtarget
!= cmp_reg
)
7141 emit_move_insn (cmp_reg
, subtarget
);
7144 /* ??? Mark this jump predicted not taken? */
7145 emit_cmp_and_jump_insns (cmp_reg
, old_reg
, NE
, const0_rtx
, GET_MODE (cmp_reg
), 1,
7150 /* This function generates the atomic operation MEM CODE= VAL. In this
7151 case, we do not care about any resulting value. Returns NULL if we
7152 cannot generate the operation. */
7155 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
7157 enum machine_mode mode
= GET_MODE (mem
);
7158 enum insn_code icode
;
7161 /* Look to see if the target supports the operation directly. */
7165 icode
= direct_optab_handler (sync_add_optab
, mode
);
7168 icode
= direct_optab_handler (sync_ior_optab
, mode
);
7171 icode
= direct_optab_handler (sync_xor_optab
, mode
);
7174 icode
= direct_optab_handler (sync_and_optab
, mode
);
7177 icode
= direct_optab_handler (sync_nand_optab
, mode
);
7181 icode
= direct_optab_handler (sync_sub_optab
, mode
);
7182 if (icode
== CODE_FOR_nothing
|| CONST_INT_P (val
))
7184 icode
= direct_optab_handler (sync_add_optab
, mode
);
7185 if (icode
!= CODE_FOR_nothing
)
7187 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7197 /* Generate the direct operation, if present. */
7198 if (icode
!= CODE_FOR_nothing
)
7200 struct expand_operand ops
[2];
7202 create_fixed_operand (&ops
[0], mem
);
7203 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7204 create_convert_operand_to (&ops
[1], val
, mode
, true);
7205 if (maybe_expand_insn (icode
, 2, ops
))
7209 /* Failing that, generate a compare-and-swap loop in which we perform the
7210 operation with normal arithmetic instructions. */
7211 if (direct_optab_handler (sync_compare_and_swap_optab
, mode
)
7212 != CODE_FOR_nothing
)
7214 rtx t0
= gen_reg_rtx (mode
), t1
;
7221 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7222 true, OPTAB_LIB_WIDEN
);
7223 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7226 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7227 true, OPTAB_LIB_WIDEN
);
7228 insn
= get_insns ();
7231 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7238 /* This function generates the atomic operation MEM CODE= VAL. In this
7239 case, we do care about the resulting value: if AFTER is true then
7240 return the value MEM holds after the operation, if AFTER is false
7241 then return the value MEM holds before the operation. TARGET is an
7242 optional place for the result value to be stored. */
7245 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
7246 bool after
, rtx target
)
7248 enum machine_mode mode
= GET_MODE (mem
);
7249 enum insn_code old_code
, new_code
, icode
;
7253 /* Look to see if the target supports the operation directly. */
7257 old_code
= direct_optab_handler (sync_old_add_optab
, mode
);
7258 new_code
= direct_optab_handler (sync_new_add_optab
, mode
);
7261 old_code
= direct_optab_handler (sync_old_ior_optab
, mode
);
7262 new_code
= direct_optab_handler (sync_new_ior_optab
, mode
);
7265 old_code
= direct_optab_handler (sync_old_xor_optab
, mode
);
7266 new_code
= direct_optab_handler (sync_new_xor_optab
, mode
);
7269 old_code
= direct_optab_handler (sync_old_and_optab
, mode
);
7270 new_code
= direct_optab_handler (sync_new_and_optab
, mode
);
7273 old_code
= direct_optab_handler (sync_old_nand_optab
, mode
);
7274 new_code
= direct_optab_handler (sync_new_nand_optab
, mode
);
7278 old_code
= direct_optab_handler (sync_old_sub_optab
, mode
);
7279 new_code
= direct_optab_handler (sync_new_sub_optab
, mode
);
7280 if ((old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
7281 || CONST_INT_P (val
))
7283 old_code
= direct_optab_handler (sync_old_add_optab
, mode
);
7284 new_code
= direct_optab_handler (sync_new_add_optab
, mode
);
7285 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
7287 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7297 /* If the target does supports the proper new/old operation, great. But
7298 if we only support the opposite old/new operation, check to see if we
7299 can compensate. In the case in which the old value is supported, then
7300 we can always perform the operation again with normal arithmetic. In
7301 the case in which the new value is supported, then we can only handle
7302 this in the case the operation is reversible. */
7307 if (icode
== CODE_FOR_nothing
)
7310 if (icode
!= CODE_FOR_nothing
)
7317 if (icode
== CODE_FOR_nothing
7318 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
7321 if (icode
!= CODE_FOR_nothing
)
7326 /* If we found something supported, great. */
7327 if (icode
!= CODE_FOR_nothing
)
7329 struct expand_operand ops
[3];
7331 create_output_operand (&ops
[0], target
, mode
);
7332 create_fixed_operand (&ops
[1], mem
);
7333 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7334 create_convert_operand_to (&ops
[2], val
, mode
, true);
7335 if (maybe_expand_insn (icode
, 3, ops
))
7337 target
= ops
[0].value
;
7339 /* If we need to compensate for using an operation with the
7340 wrong return value, do so now. */
7347 else if (code
== MINUS
)
7353 target
= expand_simple_binop (mode
, AND
, target
, val
,
7356 target
= expand_simple_unop (mode
, code
, target
,
7360 target
= expand_simple_binop (mode
, code
, target
, val
,
7369 /* Failing that, generate a compare-and-swap loop in which we perform the
7370 operation with normal arithmetic instructions. */
7371 if (direct_optab_handler (sync_compare_and_swap_optab
, mode
)
7372 != CODE_FOR_nothing
)
7374 rtx t0
= gen_reg_rtx (mode
), t1
;
7376 if (!target
|| !register_operand (target
, mode
))
7377 target
= gen_reg_rtx (mode
);
7382 emit_move_insn (target
, t0
);
7386 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7387 true, OPTAB_LIB_WIDEN
);
7388 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7391 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7392 true, OPTAB_LIB_WIDEN
);
7394 emit_move_insn (target
, t1
);
7396 insn
= get_insns ();
7399 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7406 /* This function expands a test-and-set operation. Ideally we atomically
7407 store VAL in MEM and return the previous value in MEM. Some targets
7408 may not support this operation and only support VAL with the constant 1;
7409 in this case while the return value will be 0/1, but the exact value
7410 stored in MEM is target defined. TARGET is an option place to stick
7411 the return value. */
7414 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
7416 enum machine_mode mode
= GET_MODE (mem
);
7417 enum insn_code icode
;
7419 /* If the target supports the test-and-set directly, great. */
7420 icode
= direct_optab_handler (sync_lock_test_and_set_optab
, mode
);
7421 if (icode
!= CODE_FOR_nothing
)
7423 struct expand_operand ops
[3];
7425 create_output_operand (&ops
[0], target
, mode
);
7426 create_fixed_operand (&ops
[1], mem
);
7427 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7428 create_convert_operand_to (&ops
[2], val
, mode
, true);
7429 if (maybe_expand_insn (icode
, 3, ops
))
7430 return ops
[0].value
;
7433 /* Otherwise, use a compare-and-swap loop for the exchange. */
7434 if (direct_optab_handler (sync_compare_and_swap_optab
, mode
)
7435 != CODE_FOR_nothing
)
7437 if (!target
|| !register_operand (target
, mode
))
7438 target
= gen_reg_rtx (mode
);
7439 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7440 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7441 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7448 /* Return true if OPERAND is suitable for operand number OPNO of
7449 instruction ICODE. */
7452 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7454 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7455 || (insn_data
[(int) icode
].operand
[opno
].predicate
7456 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7459 /* TARGET is a target of a multiword operation that we are going to
7460 implement as a series of word-mode operations. Return true if
7461 TARGET is suitable for this purpose. */
7464 valid_multiword_target_p (rtx target
)
7466 enum machine_mode mode
;
7469 mode
= GET_MODE (target
);
7470 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
7471 if (!validate_subreg (word_mode
, mode
, target
, i
))
7476 /* Like maybe_legitimize_operand, but do not change the code of the
7477 current rtx value. */
7480 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7481 struct expand_operand
*op
)
7483 /* See if the operand matches in its current form. */
7484 if (insn_operand_matches (icode
, opno
, op
->value
))
7487 /* If the operand is a memory whose address has no side effects,
7488 try forcing the address into a register. The check for side
7489 effects is important because force_reg cannot handle things
7490 like auto-modified addresses. */
7491 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
7492 && MEM_P (op
->value
)
7493 && !side_effects_p (XEXP (op
->value
, 0)))
7495 rtx addr
, mem
, last
;
7497 last
= get_last_insn ();
7498 addr
= force_reg (Pmode
, XEXP (op
->value
, 0));
7499 mem
= replace_equiv_address (op
->value
, addr
);
7500 if (insn_operand_matches (icode
, opno
, mem
))
7505 delete_insns_since (last
);
7511 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7512 on success, storing the new operand value back in OP. */
7515 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7516 struct expand_operand
*op
)
7518 enum machine_mode mode
, imode
;
7519 bool old_volatile_ok
, result
;
7525 old_volatile_ok
= volatile_ok
;
7527 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
7528 volatile_ok
= old_volatile_ok
;
7532 gcc_assert (mode
!= VOIDmode
);
7534 && op
->value
!= const0_rtx
7535 && GET_MODE (op
->value
) == mode
7536 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7539 op
->value
= gen_reg_rtx (mode
);
7544 gcc_assert (mode
!= VOIDmode
);
7545 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7546 || GET_MODE (op
->value
) == mode
);
7547 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7550 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7553 case EXPAND_CONVERT_TO
:
7554 gcc_assert (mode
!= VOIDmode
);
7555 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7558 case EXPAND_CONVERT_FROM
:
7559 if (GET_MODE (op
->value
) != VOIDmode
)
7560 mode
= GET_MODE (op
->value
);
7562 /* The caller must tell us what mode this value has. */
7563 gcc_assert (mode
!= VOIDmode
);
7565 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7566 if (imode
!= VOIDmode
&& imode
!= mode
)
7568 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
7573 case EXPAND_ADDRESS
:
7574 gcc_assert (mode
!= VOIDmode
);
7575 op
->value
= convert_memory_address (mode
, op
->value
);
7578 case EXPAND_INTEGER
:
7579 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7580 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
7584 return insn_operand_matches (icode
, opno
, op
->value
);
7587 /* Make OP describe an input operand that should have the same value
7588 as VALUE, after any mode conversion that the target might request.
7589 TYPE is the type of VALUE. */
7592 create_convert_operand_from_type (struct expand_operand
*op
,
7593 rtx value
, tree type
)
7595 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7596 TYPE_UNSIGNED (type
));
7599 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7600 of instruction ICODE. Return true on success, leaving the new operand
7601 values in the OPS themselves. Emit no code on failure. */
7604 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7605 unsigned int nops
, struct expand_operand
*ops
)
7610 last
= get_last_insn ();
7611 for (i
= 0; i
< nops
; i
++)
7612 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7614 delete_insns_since (last
);
7620 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7621 as its operands. Return the instruction pattern on success,
7622 and emit any necessary set-up code. Return null and emit no
7626 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7627 struct expand_operand
*ops
)
7629 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7630 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7636 return GEN_FCN (icode
) (ops
[0].value
);
7638 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7640 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7642 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7645 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7646 ops
[3].value
, ops
[4].value
);
7648 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7649 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7654 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7655 as its operands. Return true on success and emit no code on failure. */
7658 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7659 struct expand_operand
*ops
)
7661 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
7670 /* Like maybe_expand_insn, but for jumps. */
7673 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7674 struct expand_operand
*ops
)
7676 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
7679 emit_jump_insn (pat
);
7685 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7689 expand_insn (enum insn_code icode
, unsigned int nops
,
7690 struct expand_operand
*ops
)
7692 if (!maybe_expand_insn (icode
, nops
, ops
))
7696 /* Like expand_insn, but for jumps. */
7699 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7700 struct expand_operand
*ops
)
7702 if (!maybe_expand_jump_insn (icode
, nops
, ops
))
7706 #include "gt-optabs.h"