1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
44 #include "basic-block.h"
47 struct target_optabs default_target_optabs
;
48 struct target_libfuncs default_target_libfuncs
;
50 struct target_optabs
*this_target_optabs
= &default_target_optabs
;
51 struct target_libfuncs
*this_target_libfuncs
= &default_target_libfuncs
;
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab
[NUM_RTX_CODE
+ 1];
60 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
62 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
64 /* Debug facility for use in GDB. */
65 void debug_optab_libfuncs (void);
67 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
68 #if ENABLE_DECIMAL_BID_FORMAT
69 #define DECIMAL_PREFIX "bid_"
71 #define DECIMAL_PREFIX "dpd_"
74 /* Used for libfunc_hash. */
77 hash_libfunc (const void *p
)
79 const struct libfunc_entry
*const e
= (const struct libfunc_entry
*) p
;
81 return (((int) e
->mode1
+ (int) e
->mode2
* NUM_MACHINE_MODES
)
85 /* Used for libfunc_hash. */
88 eq_libfunc (const void *p
, const void *q
)
90 const struct libfunc_entry
*const e1
= (const struct libfunc_entry
*) p
;
91 const struct libfunc_entry
*const e2
= (const struct libfunc_entry
*) q
;
93 return (e1
->optab
== e2
->optab
94 && e1
->mode1
== e2
->mode1
95 && e1
->mode2
== e2
->mode2
);
98 /* Return libfunc corresponding operation defined by OPTAB converting
99 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
100 if no libfunc is available. */
102 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
103 enum machine_mode mode2
)
105 struct libfunc_entry e
;
106 struct libfunc_entry
**slot
;
108 e
.optab
= (size_t) (optab
- &convert_optab_table
[0]);
111 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
114 if (optab
->libcall_gen
)
116 optab
->libcall_gen (optab
, optab
->libcall_basename
, mode1
, mode2
);
117 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
119 return (*slot
)->libfunc
;
125 return (*slot
)->libfunc
;
128 /* Return libfunc corresponding operation defined by OPTAB in MODE.
129 Trigger lazy initialization if needed, return NULL if no libfunc is
132 optab_libfunc (optab optab
, enum machine_mode mode
)
134 struct libfunc_entry e
;
135 struct libfunc_entry
**slot
;
137 e
.optab
= (size_t) (optab
- &optab_table
[0]);
140 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, NO_INSERT
);
143 if (optab
->libcall_gen
)
145 optab
->libcall_gen (optab
, optab
->libcall_basename
,
146 optab
->libcall_suffix
, mode
);
147 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
,
150 return (*slot
)->libfunc
;
156 return (*slot
)->libfunc
;
160 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
161 the result of operation CODE applied to OP0 (and OP1 if it is a binary
164 If the last insn does not set TARGET, don't do anything, but return 1.
166 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
167 don't add the REG_EQUAL note but return 0. Our caller can then try
168 again, ensuring that TARGET is not one of the operands. */
171 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
173 rtx last_insn
, insn
, set
;
176 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
178 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
179 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
180 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
181 && GET_RTX_CLASS (code
) != RTX_COMPARE
182 && GET_RTX_CLASS (code
) != RTX_UNARY
)
185 if (GET_CODE (target
) == ZERO_EXTRACT
)
188 for (last_insn
= insns
;
189 NEXT_INSN (last_insn
) != NULL_RTX
;
190 last_insn
= NEXT_INSN (last_insn
))
193 set
= single_set (last_insn
);
197 if (! rtx_equal_p (SET_DEST (set
), target
)
198 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
199 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
200 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
203 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
204 besides the last insn. */
205 if (reg_overlap_mentioned_p (target
, op0
)
206 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
208 insn
= PREV_INSN (last_insn
);
209 while (insn
!= NULL_RTX
)
211 if (reg_set_p (target
, insn
))
214 insn
= PREV_INSN (insn
);
218 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
228 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
230 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
231 if (GET_MODE_SIZE (GET_MODE (op0
))
232 > GET_MODE_SIZE (GET_MODE (target
)))
233 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
234 note
, GET_MODE (op0
));
236 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
237 note
, GET_MODE (op0
));
242 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
246 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
248 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
253 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
254 for a widening operation would be. In most cases this would be OP0, but if
255 that's a constant it'll be VOIDmode, which isn't useful. */
257 static enum machine_mode
258 widened_mode (enum machine_mode to_mode
, rtx op0
, rtx op1
)
260 enum machine_mode m0
= GET_MODE (op0
);
261 enum machine_mode m1
= GET_MODE (op1
);
262 enum machine_mode result
;
264 if (m0
== VOIDmode
&& m1
== VOIDmode
)
266 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
271 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
277 /* Find a widening optab even if it doesn't widen as much as we want.
278 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
279 direct HI->SI insn, then return SI->DI, if that exists.
280 If PERMIT_NON_WIDENING is non-zero then this can be used with
281 non-widening optabs also. */
284 find_widening_optab_handler_and_mode (optab op
, enum machine_mode to_mode
,
285 enum machine_mode from_mode
,
286 int permit_non_widening
,
287 enum machine_mode
*found_mode
)
289 for (; (permit_non_widening
|| from_mode
!= to_mode
)
290 && GET_MODE_SIZE (from_mode
) <= GET_MODE_SIZE (to_mode
)
291 && from_mode
!= VOIDmode
;
292 from_mode
= GET_MODE_WIDER_MODE (from_mode
))
294 enum insn_code handler
= widening_optab_handler (op
, to_mode
,
297 if (handler
!= CODE_FOR_nothing
)
300 *found_mode
= from_mode
;
305 return CODE_FOR_nothing
;
308 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
309 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
310 not actually do a sign-extend or zero-extend, but can leave the
311 higher-order bits of the result rtx undefined, for example, in the case
312 of logical operations, but not right shifts. */
315 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
316 int unsignedp
, int no_extend
)
320 /* If we don't have to extend and this is a constant, return it. */
321 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
324 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
325 extend since it will be more efficient to do so unless the signedness of
326 a promoted object differs from our extension. */
328 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
329 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
330 return convert_modes (mode
, oldmode
, op
, unsignedp
);
332 /* If MODE is no wider than a single word, we return a paradoxical
334 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
335 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
337 /* Otherwise, get an object of MODE, clobber it, and set the low-order
340 result
= gen_reg_rtx (mode
);
341 emit_clobber (result
);
342 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
346 /* Return the optab used for computing the operation given by the tree code,
347 CODE and the tree EXP. This function is not always usable (for example, it
348 cannot give complete results for multiplication or division) but probably
349 ought to be relied on more widely throughout the expander. */
351 optab_for_tree_code (enum tree_code code
, const_tree type
,
352 enum optab_subtype subtype
)
364 return one_cmpl_optab
;
373 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
381 if (TYPE_SATURATING(type
))
382 return TYPE_UNSIGNED(type
) ? usdiv_optab
: ssdiv_optab
;
383 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
386 if (TREE_CODE (type
) == VECTOR_TYPE
)
388 if (subtype
== optab_vector
)
389 return TYPE_SATURATING (type
) ? NULL
: vashl_optab
;
391 gcc_assert (subtype
== optab_scalar
);
393 if (TYPE_SATURATING(type
))
394 return TYPE_UNSIGNED(type
) ? usashl_optab
: ssashl_optab
;
398 if (TREE_CODE (type
) == VECTOR_TYPE
)
400 if (subtype
== optab_vector
)
401 return TYPE_UNSIGNED (type
) ? vlshr_optab
: vashr_optab
;
403 gcc_assert (subtype
== optab_scalar
);
405 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
408 if (TREE_CODE (type
) == VECTOR_TYPE
)
410 if (subtype
== optab_vector
)
413 gcc_assert (subtype
== optab_scalar
);
418 if (TREE_CODE (type
) == VECTOR_TYPE
)
420 if (subtype
== optab_vector
)
423 gcc_assert (subtype
== optab_scalar
);
428 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
431 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
433 case REALIGN_LOAD_EXPR
:
434 return vec_realign_load_optab
;
437 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
440 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
442 case WIDEN_MULT_PLUS_EXPR
:
443 return (TYPE_UNSIGNED (type
)
444 ? (TYPE_SATURATING (type
)
445 ? usmadd_widen_optab
: umadd_widen_optab
)
446 : (TYPE_SATURATING (type
)
447 ? ssmadd_widen_optab
: smadd_widen_optab
));
449 case WIDEN_MULT_MINUS_EXPR
:
450 return (TYPE_UNSIGNED (type
)
451 ? (TYPE_SATURATING (type
)
452 ? usmsub_widen_optab
: umsub_widen_optab
)
453 : (TYPE_SATURATING (type
)
454 ? ssmsub_widen_optab
: smsub_widen_optab
));
460 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
463 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
465 case REDUC_PLUS_EXPR
:
466 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
468 case VEC_LSHIFT_EXPR
:
469 return vec_shl_optab
;
471 case VEC_RSHIFT_EXPR
:
472 return vec_shr_optab
;
474 case VEC_WIDEN_MULT_HI_EXPR
:
475 return TYPE_UNSIGNED (type
) ?
476 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
478 case VEC_WIDEN_MULT_LO_EXPR
:
479 return TYPE_UNSIGNED (type
) ?
480 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
482 case VEC_WIDEN_LSHIFT_HI_EXPR
:
483 return TYPE_UNSIGNED (type
) ?
484 vec_widen_ushiftl_hi_optab
: vec_widen_sshiftl_hi_optab
;
486 case VEC_WIDEN_LSHIFT_LO_EXPR
:
487 return TYPE_UNSIGNED (type
) ?
488 vec_widen_ushiftl_lo_optab
: vec_widen_sshiftl_lo_optab
;
490 case VEC_UNPACK_HI_EXPR
:
491 return TYPE_UNSIGNED (type
) ?
492 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
494 case VEC_UNPACK_LO_EXPR
:
495 return TYPE_UNSIGNED (type
) ?
496 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
498 case VEC_UNPACK_FLOAT_HI_EXPR
:
499 /* The signedness is determined from input operand. */
500 return TYPE_UNSIGNED (type
) ?
501 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
503 case VEC_UNPACK_FLOAT_LO_EXPR
:
504 /* The signedness is determined from input operand. */
505 return TYPE_UNSIGNED (type
) ?
506 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
508 case VEC_PACK_TRUNC_EXPR
:
509 return vec_pack_trunc_optab
;
511 case VEC_PACK_SAT_EXPR
:
512 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
514 case VEC_PACK_FIX_TRUNC_EXPR
:
515 /* The signedness is determined from output operand. */
516 return TYPE_UNSIGNED (type
) ?
517 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
523 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
526 case POINTER_PLUS_EXPR
:
528 if (TYPE_SATURATING(type
))
529 return TYPE_UNSIGNED(type
) ? usadd_optab
: ssadd_optab
;
530 return trapv
? addv_optab
: add_optab
;
533 if (TYPE_SATURATING(type
))
534 return TYPE_UNSIGNED(type
) ? ussub_optab
: sssub_optab
;
535 return trapv
? subv_optab
: sub_optab
;
538 if (TYPE_SATURATING(type
))
539 return TYPE_UNSIGNED(type
) ? usmul_optab
: ssmul_optab
;
540 return trapv
? smulv_optab
: smul_optab
;
543 if (TYPE_SATURATING(type
))
544 return TYPE_UNSIGNED(type
) ? usneg_optab
: ssneg_optab
;
545 return trapv
? negv_optab
: neg_optab
;
548 return trapv
? absv_optab
: abs_optab
;
550 case VEC_EXTRACT_EVEN_EXPR
:
551 return vec_extract_even_optab
;
553 case VEC_EXTRACT_ODD_EXPR
:
554 return vec_extract_odd_optab
;
556 case VEC_INTERLEAVE_HIGH_EXPR
:
557 return vec_interleave_high_optab
;
559 case VEC_INTERLEAVE_LOW_EXPR
:
560 return vec_interleave_low_optab
;
568 /* Expand vector widening operations.
570 There are two different classes of operations handled here:
571 1) Operations whose result is wider than all the arguments to the operation.
572 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
573 In this case OP0 and optionally OP1 would be initialized,
574 but WIDE_OP wouldn't (not relevant for this case).
575 2) Operations whose result is of the same size as the last argument to the
576 operation, but wider than all the other arguments to the operation.
577 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
578 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
580 E.g, when called to expand the following operations, this is how
581 the arguments will be initialized:
583 widening-sum 2 oprnd0 - oprnd1
584 widening-dot-product 3 oprnd0 oprnd1 oprnd2
585 widening-mult 2 oprnd0 oprnd1 -
586 type-promotion (vec-unpack) 1 oprnd0 - - */
589 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
590 rtx target
, int unsignedp
)
592 struct expand_operand eops
[4];
593 tree oprnd0
, oprnd1
, oprnd2
;
594 enum machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
595 optab widen_pattern_optab
;
596 enum insn_code icode
;
597 int nops
= TREE_CODE_LENGTH (ops
->code
);
601 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
602 widen_pattern_optab
=
603 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
604 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
605 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
606 icode
= find_widening_optab_handler (widen_pattern_optab
,
607 TYPE_MODE (TREE_TYPE (ops
->op2
)),
610 icode
= optab_handler (widen_pattern_optab
, tmode0
);
611 gcc_assert (icode
!= CODE_FOR_nothing
);
616 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
619 /* The last operand is of a wider mode than the rest of the operands. */
624 gcc_assert (tmode1
== tmode0
);
627 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
631 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
632 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
634 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
636 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
637 expand_insn (icode
, op
, eops
);
638 return eops
[0].value
;
641 /* Generate code to perform an operation specified by TERNARY_OPTAB
642 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
644 UNSIGNEDP is for the case where we have to widen the operands
645 to perform the operation. It says to use zero-extension.
647 If TARGET is nonzero, the value
648 is generated there, if it is convenient to do so.
649 In all cases an rtx is returned for the locus of the value;
650 this may or may not be TARGET. */
653 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
654 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
656 struct expand_operand ops
[4];
657 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
659 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
661 create_output_operand (&ops
[0], target
, mode
);
662 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
663 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
664 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
665 expand_insn (icode
, 4, ops
);
670 /* Like expand_binop, but return a constant rtx if the result can be
671 calculated at compile time. The arguments and return value are
672 otherwise the same as for expand_binop. */
675 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
676 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
677 enum optab_methods methods
)
679 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
681 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
687 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
690 /* Like simplify_expand_binop, but always put the result in TARGET.
691 Return true if the expansion succeeded. */
694 force_expand_binop (enum machine_mode mode
, optab binoptab
,
695 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
696 enum optab_methods methods
)
698 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
699 target
, unsignedp
, methods
);
703 emit_move_insn (target
, x
);
707 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
710 expand_vec_shift_expr (sepops ops
, rtx target
)
712 struct expand_operand eops
[3];
713 enum insn_code icode
;
714 rtx rtx_op1
, rtx_op2
;
715 enum machine_mode mode
= TYPE_MODE (ops
->type
);
716 tree vec_oprnd
= ops
->op0
;
717 tree shift_oprnd
= ops
->op1
;
722 case VEC_RSHIFT_EXPR
:
723 shift_optab
= vec_shr_optab
;
725 case VEC_LSHIFT_EXPR
:
726 shift_optab
= vec_shl_optab
;
732 icode
= optab_handler (shift_optab
, mode
);
733 gcc_assert (icode
!= CODE_FOR_nothing
);
735 rtx_op1
= expand_normal (vec_oprnd
);
736 rtx_op2
= expand_normal (shift_oprnd
);
738 create_output_operand (&eops
[0], target
, mode
);
739 create_input_operand (&eops
[1], rtx_op1
, GET_MODE (rtx_op1
));
740 create_convert_operand_from_type (&eops
[2], rtx_op2
, TREE_TYPE (shift_oprnd
));
741 expand_insn (icode
, 3, eops
);
743 return eops
[0].value
;
746 /* Create a new vector value in VMODE with all elements set to OP. The
747 mode of OP must be the element mode of VMODE. If OP is a constant,
748 then the return value will be a constant. */
751 expand_vector_broadcast (enum machine_mode vmode
, rtx op
)
753 enum insn_code icode
;
758 gcc_checking_assert (VECTOR_MODE_P (vmode
));
760 n
= GET_MODE_NUNITS (vmode
);
761 vec
= rtvec_alloc (n
);
762 for (i
= 0; i
< n
; ++i
)
763 RTVEC_ELT (vec
, i
) = op
;
766 return gen_rtx_CONST_VECTOR (vmode
, vec
);
768 /* ??? If the target doesn't have a vec_init, then we have no easy way
769 of performing this operation. Most of this sort of generic support
770 is hidden away in the vector lowering support in gimple. */
771 icode
= optab_handler (vec_init_optab
, vmode
);
772 if (icode
== CODE_FOR_nothing
)
775 ret
= gen_reg_rtx (vmode
);
776 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
781 /* This subroutine of expand_doubleword_shift handles the cases in which
782 the effective shift value is >= BITS_PER_WORD. The arguments and return
783 value are the same as for the parent routine, except that SUPERWORD_OP1
784 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
785 INTO_TARGET may be null if the caller has decided to calculate it. */
788 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
789 rtx outof_target
, rtx into_target
,
790 int unsignedp
, enum optab_methods methods
)
792 if (into_target
!= 0)
793 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
794 into_target
, unsignedp
, methods
))
797 if (outof_target
!= 0)
799 /* For a signed right shift, we must fill OUTOF_TARGET with copies
800 of the sign bit, otherwise we must fill it with zeros. */
801 if (binoptab
!= ashr_optab
)
802 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
804 if (!force_expand_binop (word_mode
, binoptab
,
805 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
806 outof_target
, unsignedp
, methods
))
812 /* This subroutine of expand_doubleword_shift handles the cases in which
813 the effective shift value is < BITS_PER_WORD. The arguments and return
814 value are the same as for the parent routine. */
817 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
818 rtx outof_input
, rtx into_input
, rtx op1
,
819 rtx outof_target
, rtx into_target
,
820 int unsignedp
, enum optab_methods methods
,
821 unsigned HOST_WIDE_INT shift_mask
)
823 optab reverse_unsigned_shift
, unsigned_shift
;
826 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
827 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
829 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
830 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
831 the opposite direction to BINOPTAB. */
832 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
834 carries
= outof_input
;
835 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
836 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
841 /* We must avoid shifting by BITS_PER_WORD bits since that is either
842 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
843 has unknown behavior. Do a single shift first, then shift by the
844 remainder. It's OK to use ~OP1 as the remainder if shift counts
845 are truncated to the mode size. */
846 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
847 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
848 if (shift_mask
== BITS_PER_WORD
- 1)
850 tmp
= immed_double_const (-1, -1, op1_mode
);
851 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
856 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
857 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
861 if (tmp
== 0 || carries
== 0)
863 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
864 carries
, tmp
, 0, unsignedp
, methods
);
868 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
869 so the result can go directly into INTO_TARGET if convenient. */
870 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
871 into_target
, unsignedp
, methods
);
875 /* Now OR in the bits carried over from OUTOF_INPUT. */
876 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
877 into_target
, unsignedp
, methods
))
880 /* Use a standard word_mode shift for the out-of half. */
881 if (outof_target
!= 0)
882 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
883 outof_target
, unsignedp
, methods
))
890 #ifdef HAVE_conditional_move
891 /* Try implementing expand_doubleword_shift using conditional moves.
892 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
893 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
894 are the shift counts to use in the former and latter case. All other
895 arguments are the same as the parent routine. */
898 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
899 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
900 rtx outof_input
, rtx into_input
,
901 rtx subword_op1
, rtx superword_op1
,
902 rtx outof_target
, rtx into_target
,
903 int unsignedp
, enum optab_methods methods
,
904 unsigned HOST_WIDE_INT shift_mask
)
906 rtx outof_superword
, into_superword
;
908 /* Put the superword version of the output into OUTOF_SUPERWORD and
910 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
911 if (outof_target
!= 0 && subword_op1
== superword_op1
)
913 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
914 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
915 into_superword
= outof_target
;
916 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
917 outof_superword
, 0, unsignedp
, methods
))
922 into_superword
= gen_reg_rtx (word_mode
);
923 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
924 outof_superword
, into_superword
,
929 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
930 if (!expand_subword_shift (op1_mode
, binoptab
,
931 outof_input
, into_input
, subword_op1
,
932 outof_target
, into_target
,
933 unsignedp
, methods
, shift_mask
))
936 /* Select between them. Do the INTO half first because INTO_SUPERWORD
937 might be the current value of OUTOF_TARGET. */
938 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
939 into_target
, into_superword
, word_mode
, false))
942 if (outof_target
!= 0)
943 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
944 outof_target
, outof_superword
,
952 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
953 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
954 input operand; the shift moves bits in the direction OUTOF_INPUT->
955 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
956 of the target. OP1 is the shift count and OP1_MODE is its mode.
957 If OP1 is constant, it will have been truncated as appropriate
958 and is known to be nonzero.
960 If SHIFT_MASK is zero, the result of word shifts is undefined when the
961 shift count is outside the range [0, BITS_PER_WORD). This routine must
962 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
964 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
965 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
966 fill with zeros or sign bits as appropriate.
968 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
969 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
970 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
971 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
974 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
975 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
976 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
977 function wants to calculate it itself.
979 Return true if the shift could be successfully synthesized. */
982 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
983 rtx outof_input
, rtx into_input
, rtx op1
,
984 rtx outof_target
, rtx into_target
,
985 int unsignedp
, enum optab_methods methods
,
986 unsigned HOST_WIDE_INT shift_mask
)
988 rtx superword_op1
, tmp
, cmp1
, cmp2
;
989 rtx subword_label
, done_label
;
990 enum rtx_code cmp_code
;
992 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
993 fill the result with sign or zero bits as appropriate. If so, the value
994 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
995 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
996 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
998 This isn't worthwhile for constant shifts since the optimizers will
999 cope better with in-range shift counts. */
1000 if (shift_mask
>= BITS_PER_WORD
1001 && outof_target
!= 0
1002 && !CONSTANT_P (op1
))
1004 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1005 outof_input
, into_input
, op1
,
1007 unsignedp
, methods
, shift_mask
))
1009 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1010 outof_target
, unsignedp
, methods
))
1015 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1016 is true when the effective shift value is less than BITS_PER_WORD.
1017 Set SUPERWORD_OP1 to the shift count that should be used to shift
1018 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1019 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
1020 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1022 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1023 is a subword shift count. */
1024 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1026 cmp2
= CONST0_RTX (op1_mode
);
1028 superword_op1
= op1
;
1032 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1033 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1035 cmp2
= CONST0_RTX (op1_mode
);
1037 superword_op1
= cmp1
;
1042 /* If we can compute the condition at compile time, pick the
1043 appropriate subroutine. */
1044 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1045 if (tmp
!= 0 && CONST_INT_P (tmp
))
1047 if (tmp
== const0_rtx
)
1048 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1049 outof_target
, into_target
,
1050 unsignedp
, methods
);
1052 return expand_subword_shift (op1_mode
, binoptab
,
1053 outof_input
, into_input
, op1
,
1054 outof_target
, into_target
,
1055 unsignedp
, methods
, shift_mask
);
1058 #ifdef HAVE_conditional_move
1059 /* Try using conditional moves to generate straight-line code. */
1061 rtx start
= get_last_insn ();
1062 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1063 cmp_code
, cmp1
, cmp2
,
1064 outof_input
, into_input
,
1066 outof_target
, into_target
,
1067 unsignedp
, methods
, shift_mask
))
1069 delete_insns_since (start
);
1073 /* As a last resort, use branches to select the correct alternative. */
1074 subword_label
= gen_label_rtx ();
1075 done_label
= gen_label_rtx ();
1078 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1079 0, 0, subword_label
, -1);
1082 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1083 outof_target
, into_target
,
1084 unsignedp
, methods
))
1087 emit_jump_insn (gen_jump (done_label
));
1089 emit_label (subword_label
);
1091 if (!expand_subword_shift (op1_mode
, binoptab
,
1092 outof_input
, into_input
, op1
,
1093 outof_target
, into_target
,
1094 unsignedp
, methods
, shift_mask
))
1097 emit_label (done_label
);
1101 /* Subroutine of expand_binop. Perform a double word multiplication of
1102 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1103 as the target's word_mode. This function return NULL_RTX if anything
1104 goes wrong, in which case it may have already emitted instructions
1105 which need to be deleted.
1107 If we want to multiply two two-word values and have normal and widening
1108 multiplies of single-word values, we can do this with three smaller
1111 The multiplication proceeds as follows:
1112 _______________________
1113 [__op0_high_|__op0_low__]
1114 _______________________
1115 * [__op1_high_|__op1_low__]
1116 _______________________________________________
1117 _______________________
1118 (1) [__op0_low__*__op1_low__]
1119 _______________________
1120 (2a) [__op0_low__*__op1_high_]
1121 _______________________
1122 (2b) [__op0_high_*__op1_low__]
1123 _______________________
1124 (3) [__op0_high_*__op1_high_]
1127 This gives a 4-word result. Since we are only interested in the
1128 lower 2 words, partial result (3) and the upper words of (2a) and
1129 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1130 calculated using non-widening multiplication.
1132 (1), however, needs to be calculated with an unsigned widening
1133 multiplication. If this operation is not directly supported we
1134 try using a signed widening multiplication and adjust the result.
1135 This adjustment works as follows:
1137 If both operands are positive then no adjustment is needed.
1139 If the operands have different signs, for example op0_low < 0 and
1140 op1_low >= 0, the instruction treats the most significant bit of
1141 op0_low as a sign bit instead of a bit with significance
1142 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1143 with 2**BITS_PER_WORD - op0_low, and two's complements the
1144 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1147 Similarly, if both operands are negative, we need to add
1148 (op0_low + op1_low) * 2**BITS_PER_WORD.
1150 We use a trick to adjust quickly. We logically shift op0_low right
1151 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1152 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1153 logical shift exists, we do an arithmetic right shift and subtract
1157 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1158 bool umulp
, enum optab_methods methods
)
1160 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1161 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1162 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1163 rtx product
, adjust
, product_high
, temp
;
1165 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1166 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1167 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1168 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1170 /* If we're using an unsigned multiply to directly compute the product
1171 of the low-order words of the operands and perform any required
1172 adjustments of the operands, we begin by trying two more multiplications
1173 and then computing the appropriate sum.
1175 We have checked above that the required addition is provided.
1176 Full-word addition will normally always succeed, especially if
1177 it is provided at all, so we don't worry about its failure. The
1178 multiplication may well fail, however, so we do handle that. */
1182 /* ??? This could be done with emit_store_flag where available. */
1183 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1184 NULL_RTX
, 1, methods
);
1186 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1187 NULL_RTX
, 0, OPTAB_DIRECT
);
1190 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1191 NULL_RTX
, 0, methods
);
1194 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1195 NULL_RTX
, 0, OPTAB_DIRECT
);
1202 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1203 NULL_RTX
, 0, OPTAB_DIRECT
);
1207 /* OP0_HIGH should now be dead. */
1211 /* ??? This could be done with emit_store_flag where available. */
1212 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1213 NULL_RTX
, 1, methods
);
1215 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1216 NULL_RTX
, 0, OPTAB_DIRECT
);
1219 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1220 NULL_RTX
, 0, methods
);
1223 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1224 NULL_RTX
, 0, OPTAB_DIRECT
);
1231 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1232 NULL_RTX
, 0, OPTAB_DIRECT
);
1236 /* OP1_HIGH should now be dead. */
1238 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1239 NULL_RTX
, 0, OPTAB_DIRECT
);
1241 if (target
&& !REG_P (target
))
1245 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1246 target
, 1, OPTAB_DIRECT
);
1248 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1249 target
, 1, OPTAB_DIRECT
);
1254 product_high
= operand_subword (product
, high
, 1, mode
);
1255 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1256 NULL_RTX
, 0, OPTAB_DIRECT
);
1257 emit_move_insn (product_high
, adjust
);
1261 /* Wrapper around expand_binop which takes an rtx code to specify
1262 the operation to perform, not an optab pointer. All other
1263 arguments are the same. */
1265 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1266 rtx op1
, rtx target
, int unsignedp
,
1267 enum optab_methods methods
)
1269 optab binop
= code_to_optab
[(int) code
];
1272 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1275 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1276 binop. Order them according to commutative_operand_precedence and, if
1277 possible, try to put TARGET or a pseudo first. */
1279 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1281 int op0_prec
= commutative_operand_precedence (op0
);
1282 int op1_prec
= commutative_operand_precedence (op1
);
1284 if (op0_prec
< op1_prec
)
1287 if (op0_prec
> op1_prec
)
1290 /* With equal precedence, both orders are ok, but it is better if the
1291 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1292 if (target
== 0 || REG_P (target
))
1293 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1295 return rtx_equal_p (op1
, target
);
1298 /* Return true if BINOPTAB implements a shift operation. */
1301 shift_optab_p (optab binoptab
)
1303 switch (binoptab
->code
)
1319 /* Return true if BINOPTAB implements a commutative binary operation. */
1322 commutative_optab_p (optab binoptab
)
1324 return (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1325 || binoptab
== smul_widen_optab
1326 || binoptab
== umul_widen_optab
1327 || binoptab
== smul_highpart_optab
1328 || binoptab
== umul_highpart_optab
);
1331 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1332 optimizing, and if the operand is a constant that costs more than
1333 1 instruction, force the constant into a register and return that
1334 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1337 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1338 int opn
, rtx x
, bool unsignedp
)
1340 bool speed
= optimize_insn_for_speed_p ();
1342 if (mode
!= VOIDmode
1345 && rtx_cost (x
, binoptab
->code
, opn
, speed
) > set_src_cost (x
, speed
))
1347 if (CONST_INT_P (x
))
1349 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1350 if (intval
!= INTVAL (x
))
1351 x
= GEN_INT (intval
);
1354 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1355 x
= force_reg (mode
, x
);
1360 /* Helper function for expand_binop: handle the case where there
1361 is an insn that directly implements the indicated operation.
1362 Returns null if this is not possible. */
1364 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1366 rtx target
, int unsignedp
, enum optab_methods methods
,
1369 enum machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1370 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
1372 enum machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1373 enum machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1374 enum machine_mode mode0
, mode1
, tmp_mode
;
1375 struct expand_operand ops
[3];
1378 rtx xop0
= op0
, xop1
= op1
;
1381 /* If it is a commutative operator and the modes would match
1382 if we would swap the operands, we can save the conversions. */
1383 commutative_p
= commutative_optab_p (binoptab
);
1385 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1386 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1393 /* If we are optimizing, force expensive constants into a register. */
1394 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1395 if (!shift_optab_p (binoptab
))
1396 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1398 /* In case the insn wants input operands in modes different from
1399 those of the actual operands, convert the operands. It would
1400 seem that we don't need to convert CONST_INTs, but we do, so
1401 that they're properly zero-extended, sign-extended or truncated
1404 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1405 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1407 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1411 mode1
= GET_MODE (xop1
) != VOIDmode
? GET_MODE (xop1
) : mode
;
1412 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1414 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1418 /* If operation is commutative,
1419 try to make the first operand a register.
1420 Even better, try to make it the same as the target.
1421 Also try to make the last operand a constant. */
1423 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1430 /* Now, if insn's predicates don't allow our operands, put them into
1433 if (binoptab
== vec_pack_trunc_optab
1434 || binoptab
== vec_pack_usat_optab
1435 || binoptab
== vec_pack_ssat_optab
1436 || binoptab
== vec_pack_ufix_trunc_optab
1437 || binoptab
== vec_pack_sfix_trunc_optab
)
1439 /* The mode of the result is different then the mode of the
1441 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1442 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1444 delete_insns_since (last
);
1451 create_output_operand (&ops
[0], target
, tmp_mode
);
1452 create_input_operand (&ops
[1], xop0
, mode0
);
1453 create_input_operand (&ops
[2], xop1
, mode1
);
1454 pat
= maybe_gen_insn (icode
, 3, ops
);
1457 /* If PAT is composed of more than one insn, try to add an appropriate
1458 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1459 operand, call expand_binop again, this time without a target. */
1460 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1461 && ! add_equal_note (pat
, ops
[0].value
, binoptab
->code
,
1462 ops
[1].value
, ops
[2].value
))
1464 delete_insns_since (last
);
1465 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1466 unsignedp
, methods
);
1470 return ops
[0].value
;
1472 delete_insns_since (last
);
1476 /* Generate code to perform an operation specified by BINOPTAB
1477 on operands OP0 and OP1, with result having machine-mode MODE.
1479 UNSIGNEDP is for the case where we have to widen the operands
1480 to perform the operation. It says to use zero-extension.
1482 If TARGET is nonzero, the value
1483 is generated there, if it is convenient to do so.
1484 In all cases an rtx is returned for the locus of the value;
1485 this may or may not be TARGET. */
1488 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1489 rtx target
, int unsignedp
, enum optab_methods methods
)
1491 enum optab_methods next_methods
1492 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1493 ? OPTAB_WIDEN
: methods
);
1494 enum mode_class mclass
;
1495 enum machine_mode wider_mode
;
1498 rtx entry_last
= get_last_insn ();
1501 mclass
= GET_MODE_CLASS (mode
);
1503 /* If subtracting an integer constant, convert this into an addition of
1504 the negated constant. */
1506 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1508 op1
= negate_rtx (mode
, op1
);
1509 binoptab
= add_optab
;
1512 /* Record where to delete back to if we backtrack. */
1513 last
= get_last_insn ();
1515 /* If we can do it with a three-operand insn, do so. */
1517 if (methods
!= OPTAB_MUST_WIDEN
1518 && find_widening_optab_handler (binoptab
, mode
,
1519 widened_mode (mode
, op0
, op1
), 1)
1520 != CODE_FOR_nothing
)
1522 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1523 unsignedp
, methods
, last
);
1528 /* If we were trying to rotate, and that didn't work, try rotating
1529 the other direction before falling back to shifts and bitwise-or. */
1530 if (((binoptab
== rotl_optab
1531 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1532 || (binoptab
== rotr_optab
1533 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1534 && mclass
== MODE_INT
)
1536 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1538 unsigned int bits
= GET_MODE_PRECISION (mode
);
1540 if (CONST_INT_P (op1
))
1541 newop1
= GEN_INT (bits
- INTVAL (op1
));
1542 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1543 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1545 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1546 GEN_INT (bits
), op1
,
1547 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1549 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1550 target
, unsignedp
, methods
, last
);
1555 /* If this is a multiply, see if we can do a widening operation that
1556 takes operands of this mode and makes a wider mode. */
1558 if (binoptab
== smul_optab
1559 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1560 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1561 : smul_widen_optab
),
1562 GET_MODE_2XWIDER_MODE (mode
), mode
)
1563 != CODE_FOR_nothing
))
1565 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1566 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1567 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1571 if (GET_MODE_CLASS (mode
) == MODE_INT
1572 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1573 return gen_lowpart (mode
, temp
);
1575 return convert_to_mode (mode
, temp
, unsignedp
);
1579 /* If this is a vector shift by a scalar, see if we can do a vector
1580 shift by a vector. If so, broadcast the scalar into a vector. */
1581 if (mclass
== MODE_VECTOR_INT
)
1583 optab otheroptab
= NULL
;
1585 if (binoptab
== ashl_optab
)
1586 otheroptab
= vashl_optab
;
1587 else if (binoptab
== ashr_optab
)
1588 otheroptab
= vashr_optab
;
1589 else if (binoptab
== lshr_optab
)
1590 otheroptab
= vlshr_optab
;
1591 else if (binoptab
== rotl_optab
)
1592 otheroptab
= vrotl_optab
;
1593 else if (binoptab
== rotr_optab
)
1594 otheroptab
= vrotr_optab
;
1596 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1598 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1601 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1602 target
, unsignedp
, methods
, last
);
1609 /* Certain vector operations can be implemented with vector permutation. */
1610 if (VECTOR_MODE_P (mode
))
1612 enum tree_code tcode
= ERROR_MARK
;
1615 if (binoptab
== vec_interleave_high_optab
)
1616 tcode
= VEC_INTERLEAVE_HIGH_EXPR
;
1617 else if (binoptab
== vec_interleave_low_optab
)
1618 tcode
= VEC_INTERLEAVE_LOW_EXPR
;
1619 else if (binoptab
== vec_extract_even_optab
)
1620 tcode
= VEC_EXTRACT_EVEN_EXPR
;
1621 else if (binoptab
== vec_extract_odd_optab
)
1622 tcode
= VEC_EXTRACT_ODD_EXPR
;
1624 if (tcode
!= ERROR_MARK
1625 && can_vec_perm_for_code_p (tcode
, mode
, &sel
))
1627 temp
= expand_vec_perm (mode
, op0
, op1
, sel
, target
);
1628 gcc_assert (temp
!= NULL
);
1633 /* Look for a wider mode of the same class for which we think we
1634 can open-code the operation. Check for a widening multiply at the
1635 wider mode as well. */
1637 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1638 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1639 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1640 wider_mode
!= VOIDmode
;
1641 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1643 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1644 || (binoptab
== smul_optab
1645 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1646 && (find_widening_optab_handler ((unsignedp
1648 : smul_widen_optab
),
1649 GET_MODE_WIDER_MODE (wider_mode
),
1651 != CODE_FOR_nothing
)))
1653 rtx xop0
= op0
, xop1
= op1
;
1656 /* For certain integer operations, we need not actually extend
1657 the narrow operands, as long as we will truncate
1658 the results to the same narrowness. */
1660 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1661 || binoptab
== xor_optab
1662 || binoptab
== add_optab
|| binoptab
== sub_optab
1663 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1664 && mclass
== MODE_INT
)
1667 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1669 if (binoptab
!= ashl_optab
)
1670 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1674 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1676 /* The second operand of a shift must always be extended. */
1677 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1678 no_extend
&& binoptab
!= ashl_optab
);
1680 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1681 unsignedp
, OPTAB_DIRECT
);
1684 if (mclass
!= MODE_INT
1685 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1688 target
= gen_reg_rtx (mode
);
1689 convert_move (target
, temp
, 0);
1693 return gen_lowpart (mode
, temp
);
1696 delete_insns_since (last
);
1700 /* If operation is commutative,
1701 try to make the first operand a register.
1702 Even better, try to make it the same as the target.
1703 Also try to make the last operand a constant. */
1704 if (commutative_optab_p (binoptab
)
1705 && swap_commutative_operands_with_target (target
, op0
, op1
))
1712 /* These can be done a word at a time. */
1713 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1714 && mclass
== MODE_INT
1715 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1716 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1721 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1722 won't be accurate, so use a new target. */
1726 || !valid_multiword_target_p (target
))
1727 target
= gen_reg_rtx (mode
);
1731 /* Do the actual arithmetic. */
1732 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1734 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1735 rtx x
= expand_binop (word_mode
, binoptab
,
1736 operand_subword_force (op0
, i
, mode
),
1737 operand_subword_force (op1
, i
, mode
),
1738 target_piece
, unsignedp
, next_methods
);
1743 if (target_piece
!= x
)
1744 emit_move_insn (target_piece
, x
);
1747 insns
= get_insns ();
1750 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1757 /* Synthesize double word shifts from single word shifts. */
1758 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1759 || binoptab
== ashr_optab
)
1760 && mclass
== MODE_INT
1761 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1762 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1763 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1764 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1765 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1766 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1768 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1769 enum machine_mode op1_mode
;
1771 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1772 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1773 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1775 /* Apply the truncation to constant shifts. */
1776 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1777 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1779 if (op1
== CONST0_RTX (op1_mode
))
1782 /* Make sure that this is a combination that expand_doubleword_shift
1783 can handle. See the comments there for details. */
1784 if (double_shift_mask
== 0
1785 || (shift_mask
== BITS_PER_WORD
- 1
1786 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1789 rtx into_target
, outof_target
;
1790 rtx into_input
, outof_input
;
1791 int left_shift
, outof_word
;
1793 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1794 won't be accurate, so use a new target. */
1798 || !valid_multiword_target_p (target
))
1799 target
= gen_reg_rtx (mode
);
1803 /* OUTOF_* is the word we are shifting bits away from, and
1804 INTO_* is the word that we are shifting bits towards, thus
1805 they differ depending on the direction of the shift and
1806 WORDS_BIG_ENDIAN. */
1808 left_shift
= binoptab
== ashl_optab
;
1809 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1811 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1812 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1814 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1815 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1817 if (expand_doubleword_shift (op1_mode
, binoptab
,
1818 outof_input
, into_input
, op1
,
1819 outof_target
, into_target
,
1820 unsignedp
, next_methods
, shift_mask
))
1822 insns
= get_insns ();
1832 /* Synthesize double word rotates from single word shifts. */
1833 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1834 && mclass
== MODE_INT
1835 && CONST_INT_P (op1
)
1836 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1837 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1838 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1841 rtx into_target
, outof_target
;
1842 rtx into_input
, outof_input
;
1844 int shift_count
, left_shift
, outof_word
;
1846 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1847 won't be accurate, so use a new target. Do this also if target is not
1848 a REG, first because having a register instead may open optimization
1849 opportunities, and second because if target and op0 happen to be MEMs
1850 designating the same location, we would risk clobbering it too early
1851 in the code sequence we generate below. */
1856 || !valid_multiword_target_p (target
))
1857 target
= gen_reg_rtx (mode
);
1861 shift_count
= INTVAL (op1
);
1863 /* OUTOF_* is the word we are shifting bits away from, and
1864 INTO_* is the word that we are shifting bits towards, thus
1865 they differ depending on the direction of the shift and
1866 WORDS_BIG_ENDIAN. */
1868 left_shift
= (binoptab
== rotl_optab
);
1869 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1871 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1872 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1874 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1875 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1877 if (shift_count
== BITS_PER_WORD
)
1879 /* This is just a word swap. */
1880 emit_move_insn (outof_target
, into_input
);
1881 emit_move_insn (into_target
, outof_input
);
1886 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1887 rtx first_shift_count
, second_shift_count
;
1888 optab reverse_unsigned_shift
, unsigned_shift
;
1890 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1891 ? lshr_optab
: ashl_optab
);
1893 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1894 ? ashl_optab
: lshr_optab
);
1896 if (shift_count
> BITS_PER_WORD
)
1898 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1899 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1903 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1904 second_shift_count
= GEN_INT (shift_count
);
1907 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1908 outof_input
, first_shift_count
,
1909 NULL_RTX
, unsignedp
, next_methods
);
1910 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1911 into_input
, second_shift_count
,
1912 NULL_RTX
, unsignedp
, next_methods
);
1914 if (into_temp1
!= 0 && into_temp2
!= 0)
1915 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1916 into_target
, unsignedp
, next_methods
);
1920 if (inter
!= 0 && inter
!= into_target
)
1921 emit_move_insn (into_target
, inter
);
1923 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1924 into_input
, first_shift_count
,
1925 NULL_RTX
, unsignedp
, next_methods
);
1926 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1927 outof_input
, second_shift_count
,
1928 NULL_RTX
, unsignedp
, next_methods
);
1930 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1931 inter
= expand_binop (word_mode
, ior_optab
,
1932 outof_temp1
, outof_temp2
,
1933 outof_target
, unsignedp
, next_methods
);
1935 if (inter
!= 0 && inter
!= outof_target
)
1936 emit_move_insn (outof_target
, inter
);
1939 insns
= get_insns ();
1949 /* These can be done a word at a time by propagating carries. */
1950 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1951 && mclass
== MODE_INT
1952 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1953 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1956 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1957 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1958 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1959 rtx xop0
, xop1
, xtarget
;
1961 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1962 value is one of those, use it. Otherwise, use 1 since it is the
1963 one easiest to get. */
1964 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1965 int normalizep
= STORE_FLAG_VALUE
;
1970 /* Prepare the operands. */
1971 xop0
= force_reg (mode
, op0
);
1972 xop1
= force_reg (mode
, op1
);
1974 xtarget
= gen_reg_rtx (mode
);
1976 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1979 /* Indicate for flow that the entire target reg is being set. */
1981 emit_clobber (xtarget
);
1983 /* Do the actual arithmetic. */
1984 for (i
= 0; i
< nwords
; i
++)
1986 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1987 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1988 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1989 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1992 /* Main add/subtract of the input operands. */
1993 x
= expand_binop (word_mode
, binoptab
,
1994 op0_piece
, op1_piece
,
1995 target_piece
, unsignedp
, next_methods
);
2001 /* Store carry from main add/subtract. */
2002 carry_out
= gen_reg_rtx (word_mode
);
2003 carry_out
= emit_store_flag_force (carry_out
,
2004 (binoptab
== add_optab
2007 word_mode
, 1, normalizep
);
2014 /* Add/subtract previous carry to main result. */
2015 newx
= expand_binop (word_mode
,
2016 normalizep
== 1 ? binoptab
: otheroptab
,
2018 NULL_RTX
, 1, next_methods
);
2022 /* Get out carry from adding/subtracting carry in. */
2023 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2024 carry_tmp
= emit_store_flag_force (carry_tmp
,
2025 (binoptab
== add_optab
2028 word_mode
, 1, normalizep
);
2030 /* Logical-ior the two poss. carry together. */
2031 carry_out
= expand_binop (word_mode
, ior_optab
,
2032 carry_out
, carry_tmp
,
2033 carry_out
, 0, next_methods
);
2037 emit_move_insn (target_piece
, newx
);
2041 if (x
!= target_piece
)
2042 emit_move_insn (target_piece
, x
);
2045 carry_in
= carry_out
;
2048 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2050 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
2051 || ! rtx_equal_p (target
, xtarget
))
2053 rtx temp
= emit_move_insn (target
, xtarget
);
2055 set_unique_reg_note (temp
,
2057 gen_rtx_fmt_ee (binoptab
->code
, mode
,
2068 delete_insns_since (last
);
2071 /* Attempt to synthesize double word multiplies using a sequence of word
2072 mode multiplications. We first attempt to generate a sequence using a
2073 more efficient unsigned widening multiply, and if that fails we then
2074 try using a signed widening multiply. */
2076 if (binoptab
== smul_optab
2077 && mclass
== MODE_INT
2078 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2079 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2080 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2082 rtx product
= NULL_RTX
;
2083 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
2084 != CODE_FOR_nothing
)
2086 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2089 delete_insns_since (last
);
2092 if (product
== NULL_RTX
2093 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
2094 != CODE_FOR_nothing
)
2096 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2099 delete_insns_since (last
);
2102 if (product
!= NULL_RTX
)
2104 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
2106 temp
= emit_move_insn (target
? target
: product
, product
);
2107 set_unique_reg_note (temp
,
2109 gen_rtx_fmt_ee (MULT
, mode
,
2117 /* It can't be open-coded in this mode.
2118 Use a library call if one is available and caller says that's ok. */
2120 libfunc
= optab_libfunc (binoptab
, mode
);
2122 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2126 enum machine_mode op1_mode
= mode
;
2131 if (shift_optab_p (binoptab
))
2133 op1_mode
= targetm
.libgcc_shift_count_mode ();
2134 /* Specify unsigned here,
2135 since negative shift counts are meaningless. */
2136 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2139 if (GET_MODE (op0
) != VOIDmode
2140 && GET_MODE (op0
) != mode
)
2141 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2143 /* Pass 1 for NO_QUEUE so we don't lose any increments
2144 if the libcall is cse'd or moved. */
2145 value
= emit_library_call_value (libfunc
,
2146 NULL_RTX
, LCT_CONST
, mode
, 2,
2147 op0
, mode
, op1x
, op1_mode
);
2149 insns
= get_insns ();
2152 target
= gen_reg_rtx (mode
);
2153 emit_libcall_block (insns
, target
, value
,
2154 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
2159 delete_insns_since (last
);
2161 /* It can't be done in this mode. Can we do it in a wider mode? */
2163 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2164 || methods
== OPTAB_MUST_WIDEN
))
2166 /* Caller says, don't even try. */
2167 delete_insns_since (entry_last
);
2171 /* Compute the value of METHODS to pass to recursive calls.
2172 Don't allow widening to be tried recursively. */
2174 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2176 /* Look for a wider mode of the same class for which it appears we can do
2179 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2181 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2182 wider_mode
!= VOIDmode
;
2183 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2185 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
2187 || (methods
== OPTAB_LIB
2188 && optab_libfunc (binoptab
, wider_mode
)))
2190 rtx xop0
= op0
, xop1
= op1
;
2193 /* For certain integer operations, we need not actually extend
2194 the narrow operands, as long as we will truncate
2195 the results to the same narrowness. */
2197 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2198 || binoptab
== xor_optab
2199 || binoptab
== add_optab
|| binoptab
== sub_optab
2200 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2201 && mclass
== MODE_INT
)
2204 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2205 unsignedp
, no_extend
);
2207 /* The second operand of a shift must always be extended. */
2208 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2209 no_extend
&& binoptab
!= ashl_optab
);
2211 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2212 unsignedp
, methods
);
2215 if (mclass
!= MODE_INT
2216 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2219 target
= gen_reg_rtx (mode
);
2220 convert_move (target
, temp
, 0);
2224 return gen_lowpart (mode
, temp
);
2227 delete_insns_since (last
);
2232 delete_insns_since (entry_last
);
2236 /* Expand a binary operator which has both signed and unsigned forms.
2237 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2240 If we widen unsigned operands, we may use a signed wider operation instead
2241 of an unsigned wider operation, since the result would be the same. */
2244 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2245 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2246 enum optab_methods methods
)
2249 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2250 struct optab_d wide_soptab
;
2252 /* Do it without widening, if possible. */
2253 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2254 unsignedp
, OPTAB_DIRECT
);
2255 if (temp
|| methods
== OPTAB_DIRECT
)
2258 /* Try widening to a signed int. Make a fake signed optab that
2259 hides any signed insn for direct use. */
2260 wide_soptab
= *soptab
;
2261 set_optab_handler (&wide_soptab
, mode
, CODE_FOR_nothing
);
2262 /* We don't want to generate new hash table entries from this fake
2264 wide_soptab
.libcall_gen
= NULL
;
2266 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2267 unsignedp
, OPTAB_WIDEN
);
2269 /* For unsigned operands, try widening to an unsigned int. */
2270 if (temp
== 0 && unsignedp
)
2271 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2272 unsignedp
, OPTAB_WIDEN
);
2273 if (temp
|| methods
== OPTAB_WIDEN
)
2276 /* Use the right width libcall if that exists. */
2277 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2278 if (temp
|| methods
== OPTAB_LIB
)
2281 /* Must widen and use a libcall, use either signed or unsigned. */
2282 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2283 unsignedp
, methods
);
2287 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2288 unsignedp
, methods
);
2292 /* Generate code to perform an operation specified by UNOPPTAB
2293 on operand OP0, with two results to TARG0 and TARG1.
2294 We assume that the order of the operands for the instruction
2295 is TARG0, TARG1, OP0.
2297 Either TARG0 or TARG1 may be zero, but what that means is that
2298 the result is not actually wanted. We will generate it into
2299 a dummy pseudo-reg and discard it. They may not both be zero.
2301 Returns 1 if this operation can be performed; 0 if not. */
2304 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2307 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2308 enum mode_class mclass
;
2309 enum machine_mode wider_mode
;
2310 rtx entry_last
= get_last_insn ();
2313 mclass
= GET_MODE_CLASS (mode
);
2316 targ0
= gen_reg_rtx (mode
);
2318 targ1
= gen_reg_rtx (mode
);
2320 /* Record where to go back to if we fail. */
2321 last
= get_last_insn ();
2323 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2325 struct expand_operand ops
[3];
2326 enum insn_code icode
= optab_handler (unoptab
, mode
);
2328 create_fixed_operand (&ops
[0], targ0
);
2329 create_fixed_operand (&ops
[1], targ1
);
2330 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2331 if (maybe_expand_insn (icode
, 3, ops
))
2335 /* It can't be done in this mode. Can we do it in a wider mode? */
2337 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2339 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2340 wider_mode
!= VOIDmode
;
2341 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2343 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2345 rtx t0
= gen_reg_rtx (wider_mode
);
2346 rtx t1
= gen_reg_rtx (wider_mode
);
2347 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2349 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2351 convert_move (targ0
, t0
, unsignedp
);
2352 convert_move (targ1
, t1
, unsignedp
);
2356 delete_insns_since (last
);
2361 delete_insns_since (entry_last
);
2365 /* Generate code to perform an operation specified by BINOPTAB
2366 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2367 We assume that the order of the operands for the instruction
2368 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2369 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2371 Either TARG0 or TARG1 may be zero, but what that means is that
2372 the result is not actually wanted. We will generate it into
2373 a dummy pseudo-reg and discard it. They may not both be zero.
2375 Returns 1 if this operation can be performed; 0 if not. */
2378 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2381 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2382 enum mode_class mclass
;
2383 enum machine_mode wider_mode
;
2384 rtx entry_last
= get_last_insn ();
2387 mclass
= GET_MODE_CLASS (mode
);
2390 targ0
= gen_reg_rtx (mode
);
2392 targ1
= gen_reg_rtx (mode
);
2394 /* Record where to go back to if we fail. */
2395 last
= get_last_insn ();
2397 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2399 struct expand_operand ops
[4];
2400 enum insn_code icode
= optab_handler (binoptab
, mode
);
2401 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2402 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2403 rtx xop0
= op0
, xop1
= op1
;
2405 /* If we are optimizing, force expensive constants into a register. */
2406 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2407 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2409 create_fixed_operand (&ops
[0], targ0
);
2410 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2411 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2412 create_fixed_operand (&ops
[3], targ1
);
2413 if (maybe_expand_insn (icode
, 4, ops
))
2415 delete_insns_since (last
);
2418 /* It can't be done in this mode. Can we do it in a wider mode? */
2420 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2422 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2423 wider_mode
!= VOIDmode
;
2424 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2426 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2428 rtx t0
= gen_reg_rtx (wider_mode
);
2429 rtx t1
= gen_reg_rtx (wider_mode
);
2430 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2431 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2433 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2436 convert_move (targ0
, t0
, unsignedp
);
2437 convert_move (targ1
, t1
, unsignedp
);
2441 delete_insns_since (last
);
2446 delete_insns_since (entry_last
);
2450 /* Expand the two-valued library call indicated by BINOPTAB, but
2451 preserve only one of the values. If TARG0 is non-NULL, the first
2452 value is placed into TARG0; otherwise the second value is placed
2453 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2454 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2455 This routine assumes that the value returned by the library call is
2456 as if the return value was of an integral mode twice as wide as the
2457 mode of OP0. Returns 1 if the call was successful. */
2460 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2461 rtx targ0
, rtx targ1
, enum rtx_code code
)
2463 enum machine_mode mode
;
2464 enum machine_mode libval_mode
;
2469 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2470 gcc_assert (!targ0
!= !targ1
);
2472 mode
= GET_MODE (op0
);
2473 libfunc
= optab_libfunc (binoptab
, mode
);
2477 /* The value returned by the library function will have twice as
2478 many bits as the nominal MODE. */
2479 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2482 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2486 /* Get the part of VAL containing the value that we want. */
2487 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2488 targ0
? 0 : GET_MODE_SIZE (mode
));
2489 insns
= get_insns ();
2491 /* Move the into the desired location. */
2492 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2493 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2499 /* Wrapper around expand_unop which takes an rtx code to specify
2500 the operation to perform, not an optab pointer. All other
2501 arguments are the same. */
2503 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2504 rtx target
, int unsignedp
)
2506 optab unop
= code_to_optab
[(int) code
];
2509 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2515 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2517 A similar operation can be used for clrsb. UNOPTAB says which operation
2518 we are trying to expand. */
2520 widen_leading (enum machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2522 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2523 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2525 enum machine_mode wider_mode
;
2526 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2527 wider_mode
!= VOIDmode
;
2528 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2530 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2532 rtx xop0
, temp
, last
;
2534 last
= get_last_insn ();
2537 target
= gen_reg_rtx (mode
);
2538 xop0
= widen_operand (op0
, wider_mode
, mode
,
2539 unoptab
!= clrsb_optab
, false);
2540 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2541 unoptab
!= clrsb_optab
);
2543 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2544 GEN_INT (GET_MODE_PRECISION (wider_mode
)
2545 - GET_MODE_PRECISION (mode
)),
2546 target
, true, OPTAB_DIRECT
);
2548 delete_insns_since (last
);
2557 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2558 quantities, choosing which based on whether the high word is nonzero. */
2560 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2562 rtx xop0
= force_reg (mode
, op0
);
2563 rtx subhi
= gen_highpart (word_mode
, xop0
);
2564 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2565 rtx hi0_label
= gen_label_rtx ();
2566 rtx after_label
= gen_label_rtx ();
2567 rtx seq
, temp
, result
;
2569 /* If we were not given a target, use a word_mode register, not a
2570 'mode' register. The result will fit, and nobody is expecting
2571 anything bigger (the return type of __builtin_clz* is int). */
2573 target
= gen_reg_rtx (word_mode
);
2575 /* In any case, write to a word_mode scratch in both branches of the
2576 conditional, so we can ensure there is a single move insn setting
2577 'target' to tag a REG_EQUAL note on. */
2578 result
= gen_reg_rtx (word_mode
);
2582 /* If the high word is not equal to zero,
2583 then clz of the full value is clz of the high word. */
2584 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2585 word_mode
, true, hi0_label
);
2587 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2592 convert_move (result
, temp
, true);
2594 emit_jump_insn (gen_jump (after_label
));
2597 /* Else clz of the full value is clz of the low word plus the number
2598 of bits in the high word. */
2599 emit_label (hi0_label
);
2601 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2604 temp
= expand_binop (word_mode
, add_optab
, temp
,
2605 GEN_INT (GET_MODE_BITSIZE (word_mode
)),
2606 result
, true, OPTAB_DIRECT
);
2610 convert_move (result
, temp
, true);
2612 emit_label (after_label
);
2613 convert_move (target
, result
, true);
2618 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2630 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2632 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2634 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2635 enum machine_mode wider_mode
;
2638 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2641 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2642 wider_mode
!= VOIDmode
;
2643 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2644 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2649 last
= get_last_insn ();
2651 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2652 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2654 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2655 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2657 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2658 GET_MODE_BITSIZE (wider_mode
)
2659 - GET_MODE_BITSIZE (mode
),
2665 target
= gen_reg_rtx (mode
);
2666 emit_move_insn (target
, gen_lowpart (mode
, x
));
2669 delete_insns_since (last
);
2674 /* Try calculating bswap as two bswaps of two word-sized operands. */
2677 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2681 t1
= expand_unop (word_mode
, bswap_optab
,
2682 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2683 t0
= expand_unop (word_mode
, bswap_optab
,
2684 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2686 if (target
== 0 || !valid_multiword_target_p (target
))
2687 target
= gen_reg_rtx (mode
);
2689 emit_clobber (target
);
2690 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2691 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2696 /* Try calculating (parity x) as (and (popcount x) 1), where
2697 popcount can also be done in a wider mode. */
2699 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2701 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2702 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2704 enum machine_mode wider_mode
;
2705 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2706 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2708 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2710 rtx xop0
, temp
, last
;
2712 last
= get_last_insn ();
2715 target
= gen_reg_rtx (mode
);
2716 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2717 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2720 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2721 target
, true, OPTAB_DIRECT
);
2723 delete_insns_since (last
);
2732 /* Try calculating ctz(x) as K - clz(x & -x) ,
2733 where K is GET_MODE_PRECISION(mode) - 1.
2735 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2736 don't have to worry about what the hardware does in that case. (If
2737 the clz instruction produces the usual value at 0, which is K, the
2738 result of this code sequence will be -1; expand_ffs, below, relies
2739 on this. It might be nice to have it be K instead, for consistency
2740 with the (very few) processors that provide a ctz with a defined
2741 value, but that would take one more instruction, and it would be
2742 less convenient for expand_ffs anyway. */
2745 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2749 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2754 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2756 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2757 true, OPTAB_DIRECT
);
2759 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2761 temp
= expand_binop (mode
, sub_optab
, GEN_INT (GET_MODE_PRECISION (mode
) - 1),
2763 true, OPTAB_DIRECT
);
2773 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2779 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2780 else with the sequence used by expand_clz.
2782 The ffs builtin promises to return zero for a zero value and ctz/clz
2783 may have an undefined value in that case. If they do not give us a
2784 convenient value, we have to generate a test and branch. */
2786 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2788 HOST_WIDE_INT val
= 0;
2789 bool defined_at_zero
= false;
2792 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2796 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2800 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2802 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2805 temp
= expand_ctz (mode
, op0
, 0);
2809 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2811 defined_at_zero
= true;
2812 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2818 if (defined_at_zero
&& val
== -1)
2819 /* No correction needed at zero. */;
2822 /* We don't try to do anything clever with the situation found
2823 on some processors (eg Alpha) where ctz(0:mode) ==
2824 bitsize(mode). If someone can think of a way to send N to -1
2825 and leave alone all values in the range 0..N-1 (where N is a
2826 power of two), cheaper than this test-and-branch, please add it.
2828 The test-and-branch is done after the operation itself, in case
2829 the operation sets condition codes that can be recycled for this.
2830 (This is true on i386, for instance.) */
2832 rtx nonzero_label
= gen_label_rtx ();
2833 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2834 mode
, true, nonzero_label
);
2836 convert_move (temp
, GEN_INT (-1), false);
2837 emit_label (nonzero_label
);
2840 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2841 to produce a value in the range 0..bitsize. */
2842 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (1),
2843 target
, false, OPTAB_DIRECT
);
2850 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2859 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2860 conditions, VAL may already be a SUBREG against which we cannot generate
2861 a further SUBREG. In this case, we expect forcing the value into a
2862 register will work around the situation. */
2865 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2866 enum machine_mode imode
)
2869 ret
= lowpart_subreg (omode
, val
, imode
);
2872 val
= force_reg (imode
, val
);
2873 ret
= lowpart_subreg (omode
, val
, imode
);
2874 gcc_assert (ret
!= NULL
);
2879 /* Expand a floating point absolute value or negation operation via a
2880 logical operation on the sign bit. */
2883 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2884 rtx op0
, rtx target
)
2886 const struct real_format
*fmt
;
2887 int bitpos
, word
, nwords
, i
;
2888 enum machine_mode imode
;
2892 /* The format has to have a simple sign bit. */
2893 fmt
= REAL_MODE_FORMAT (mode
);
2897 bitpos
= fmt
->signbit_rw
;
2901 /* Don't create negative zeros if the format doesn't support them. */
2902 if (code
== NEG
&& !fmt
->has_signed_zero
)
2905 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2907 imode
= int_mode_for_mode (mode
);
2908 if (imode
== BLKmode
)
2917 if (FLOAT_WORDS_BIG_ENDIAN
)
2918 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2920 word
= bitpos
/ BITS_PER_WORD
;
2921 bitpos
= bitpos
% BITS_PER_WORD
;
2922 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2925 mask
= double_int_setbit (double_int_zero
, bitpos
);
2927 mask
= double_int_not (mask
);
2931 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2932 target
= gen_reg_rtx (mode
);
2938 for (i
= 0; i
< nwords
; ++i
)
2940 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2941 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2945 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2947 immed_double_int_const (mask
, imode
),
2948 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2949 if (temp
!= targ_piece
)
2950 emit_move_insn (targ_piece
, temp
);
2953 emit_move_insn (targ_piece
, op0_piece
);
2956 insns
= get_insns ();
2963 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2964 gen_lowpart (imode
, op0
),
2965 immed_double_int_const (mask
, imode
),
2966 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2967 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2969 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2970 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2976 /* As expand_unop, but will fail rather than attempt the operation in a
2977 different mode or with a libcall. */
2979 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2982 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2984 struct expand_operand ops
[2];
2985 enum insn_code icode
= optab_handler (unoptab
, mode
);
2986 rtx last
= get_last_insn ();
2989 create_output_operand (&ops
[0], target
, mode
);
2990 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2991 pat
= maybe_gen_insn (icode
, 2, ops
);
2994 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2995 && ! add_equal_note (pat
, ops
[0].value
, unoptab
->code
,
2996 ops
[1].value
, NULL_RTX
))
2998 delete_insns_since (last
);
2999 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3004 return ops
[0].value
;
3010 /* Generate code to perform an operation specified by UNOPTAB
3011 on operand OP0, with result having machine-mode MODE.
3013 UNSIGNEDP is for the case where we have to widen the operands
3014 to perform the operation. It says to use zero-extension.
3016 If TARGET is nonzero, the value
3017 is generated there, if it is convenient to do so.
3018 In all cases an rtx is returned for the locus of the value;
3019 this may or may not be TARGET. */
3022 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3025 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3026 enum machine_mode wider_mode
;
3030 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3034 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3036 /* Widening (or narrowing) clz needs special treatment. */
3037 if (unoptab
== clz_optab
)
3039 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3043 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3044 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3046 temp
= expand_doubleword_clz (mode
, op0
, target
);
3054 if (unoptab
== clrsb_optab
)
3056 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3062 /* Widening (or narrowing) bswap needs special treatment. */
3063 if (unoptab
== bswap_optab
)
3065 temp
= widen_bswap (mode
, op0
, target
);
3069 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3070 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3072 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3080 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3081 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3082 wider_mode
!= VOIDmode
;
3083 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3085 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3088 rtx last
= get_last_insn ();
3090 /* For certain operations, we need not actually extend
3091 the narrow operand, as long as we will truncate the
3092 results to the same narrowness. */
3094 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3095 (unoptab
== neg_optab
3096 || unoptab
== one_cmpl_optab
)
3097 && mclass
== MODE_INT
);
3099 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3104 if (mclass
!= MODE_INT
3105 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3108 target
= gen_reg_rtx (mode
);
3109 convert_move (target
, temp
, 0);
3113 return gen_lowpart (mode
, temp
);
3116 delete_insns_since (last
);
3120 /* These can be done a word at a time. */
3121 if (unoptab
== one_cmpl_optab
3122 && mclass
== MODE_INT
3123 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3124 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3129 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
3130 target
= gen_reg_rtx (mode
);
3134 /* Do the actual arithmetic. */
3135 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3137 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3138 rtx x
= expand_unop (word_mode
, unoptab
,
3139 operand_subword_force (op0
, i
, mode
),
3140 target_piece
, unsignedp
);
3142 if (target_piece
!= x
)
3143 emit_move_insn (target_piece
, x
);
3146 insns
= get_insns ();
3153 if (unoptab
->code
== NEG
)
3155 /* Try negating floating point values by flipping the sign bit. */
3156 if (SCALAR_FLOAT_MODE_P (mode
))
3158 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3163 /* If there is no negation pattern, and we have no negative zero,
3164 try subtracting from zero. */
3165 if (!HONOR_SIGNED_ZEROS (mode
))
3167 temp
= expand_binop (mode
, (unoptab
== negv_optab
3168 ? subv_optab
: sub_optab
),
3169 CONST0_RTX (mode
), op0
, target
,
3170 unsignedp
, OPTAB_DIRECT
);
3176 /* Try calculating parity (x) as popcount (x) % 2. */
3177 if (unoptab
== parity_optab
)
3179 temp
= expand_parity (mode
, op0
, target
);
3184 /* Try implementing ffs (x) in terms of clz (x). */
3185 if (unoptab
== ffs_optab
)
3187 temp
= expand_ffs (mode
, op0
, target
);
3192 /* Try implementing ctz (x) in terms of clz (x). */
3193 if (unoptab
== ctz_optab
)
3195 temp
= expand_ctz (mode
, op0
, target
);
3201 /* Now try a library call in this mode. */
3202 libfunc
= optab_libfunc (unoptab
, mode
);
3208 enum machine_mode outmode
= mode
;
3210 /* All of these functions return small values. Thus we choose to
3211 have them return something that isn't a double-word. */
3212 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3213 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3214 || unoptab
== parity_optab
)
3216 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3217 optab_libfunc (unoptab
, mode
)));
3221 /* Pass 1 for NO_QUEUE so we don't lose any increments
3222 if the libcall is cse'd or moved. */
3223 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3225 insns
= get_insns ();
3228 target
= gen_reg_rtx (outmode
);
3229 eq_value
= gen_rtx_fmt_e (unoptab
->code
, mode
, op0
);
3230 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
3231 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3232 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
3233 eq_value
= simplify_gen_unary (ZERO_EXTEND
, outmode
, eq_value
, mode
);
3234 emit_libcall_block (insns
, target
, value
, eq_value
);
3239 /* It can't be done in this mode. Can we do it in a wider mode? */
3241 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3243 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3244 wider_mode
!= VOIDmode
;
3245 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3247 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3248 || optab_libfunc (unoptab
, wider_mode
))
3251 rtx last
= get_last_insn ();
3253 /* For certain operations, we need not actually extend
3254 the narrow operand, as long as we will truncate the
3255 results to the same narrowness. */
3257 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3258 (unoptab
== neg_optab
3259 || unoptab
== one_cmpl_optab
)
3260 && mclass
== MODE_INT
);
3262 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3265 /* If we are generating clz using wider mode, adjust the
3266 result. Similarly for clrsb. */
3267 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3269 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
3270 GEN_INT (GET_MODE_PRECISION (wider_mode
)
3271 - GET_MODE_PRECISION (mode
)),
3272 target
, true, OPTAB_DIRECT
);
3276 if (mclass
!= MODE_INT
)
3279 target
= gen_reg_rtx (mode
);
3280 convert_move (target
, temp
, 0);
3284 return gen_lowpart (mode
, temp
);
3287 delete_insns_since (last
);
3292 /* One final attempt at implementing negation via subtraction,
3293 this time allowing widening of the operand. */
3294 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3297 temp
= expand_binop (mode
,
3298 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3299 CONST0_RTX (mode
), op0
,
3300 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3308 /* Emit code to compute the absolute value of OP0, with result to
3309 TARGET if convenient. (TARGET may be 0.) The return value says
3310 where the result actually is to be found.
3312 MODE is the mode of the operand; the mode of the result is
3313 different but can be deduced from MODE.
3318 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3319 int result_unsignedp
)
3324 result_unsignedp
= 1;
3326 /* First try to do it with a special abs instruction. */
3327 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3332 /* For floating point modes, try clearing the sign bit. */
3333 if (SCALAR_FLOAT_MODE_P (mode
))
3335 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3340 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3341 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3342 && !HONOR_SIGNED_ZEROS (mode
))
3344 rtx last
= get_last_insn ();
3346 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
3348 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3354 delete_insns_since (last
);
3357 /* If this machine has expensive jumps, we can do integer absolute
3358 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3359 where W is the width of MODE. */
3361 if (GET_MODE_CLASS (mode
) == MODE_INT
3362 && BRANCH_COST (optimize_insn_for_speed_p (),
3365 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3366 GET_MODE_PRECISION (mode
) - 1,
3369 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3372 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3373 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3383 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3384 int result_unsignedp
, int safe
)
3389 result_unsignedp
= 1;
3391 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3395 /* If that does not win, use conditional jump and negate. */
3397 /* It is safe to use the target if it is the same
3398 as the source if this is also a pseudo register */
3399 if (op0
== target
&& REG_P (op0
)
3400 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3403 op1
= gen_label_rtx ();
3404 if (target
== 0 || ! safe
3405 || GET_MODE (target
) != mode
3406 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3408 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3409 target
= gen_reg_rtx (mode
);
3411 emit_move_insn (target
, op0
);
3414 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3415 NULL_RTX
, NULL_RTX
, op1
, -1);
3417 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3420 emit_move_insn (target
, op0
);
3426 /* Emit code to compute the one's complement absolute value of OP0
3427 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3428 (TARGET may be NULL_RTX.) The return value says where the result
3429 actually is to be found.
3431 MODE is the mode of the operand; the mode of the result is
3432 different but can be deduced from MODE. */
3435 expand_one_cmpl_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
)
3439 /* Not applicable for floating point modes. */
3440 if (FLOAT_MODE_P (mode
))
3443 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3444 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3446 rtx last
= get_last_insn ();
3448 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3450 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3456 delete_insns_since (last
);
3459 /* If this machine has expensive jumps, we can do one's complement
3460 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3462 if (GET_MODE_CLASS (mode
) == MODE_INT
3463 && BRANCH_COST (optimize_insn_for_speed_p (),
3466 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3467 GET_MODE_PRECISION (mode
) - 1,
3470 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3480 /* A subroutine of expand_copysign, perform the copysign operation using the
3481 abs and neg primitives advertised to exist on the target. The assumption
3482 is that we have a split register file, and leaving op0 in fp registers,
3483 and not playing with subregs so much, will help the register allocator. */
3486 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3487 int bitpos
, bool op0_is_abs
)
3489 enum machine_mode imode
;
3490 enum insn_code icode
;
3496 /* Check if the back end provides an insn that handles signbit for the
3498 icode
= optab_handler (signbit_optab
, mode
);
3499 if (icode
!= CODE_FOR_nothing
)
3501 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3502 sign
= gen_reg_rtx (imode
);
3503 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3509 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3511 imode
= int_mode_for_mode (mode
);
3512 if (imode
== BLKmode
)
3514 op1
= gen_lowpart (imode
, op1
);
3521 if (FLOAT_WORDS_BIG_ENDIAN
)
3522 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3524 word
= bitpos
/ BITS_PER_WORD
;
3525 bitpos
= bitpos
% BITS_PER_WORD
;
3526 op1
= operand_subword_force (op1
, word
, mode
);
3529 mask
= double_int_setbit (double_int_zero
, bitpos
);
3531 sign
= expand_binop (imode
, and_optab
, op1
,
3532 immed_double_int_const (mask
, imode
),
3533 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3538 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3545 if (target
== NULL_RTX
)
3546 target
= copy_to_reg (op0
);
3548 emit_move_insn (target
, op0
);
3551 label
= gen_label_rtx ();
3552 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3554 if (GET_CODE (op0
) == CONST_DOUBLE
)
3555 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3557 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3559 emit_move_insn (target
, op0
);
3567 /* A subroutine of expand_copysign, perform the entire copysign operation
3568 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3569 is true if op0 is known to have its sign bit clear. */
3572 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3573 int bitpos
, bool op0_is_abs
)
3575 enum machine_mode imode
;
3577 int word
, nwords
, i
;
3580 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3582 imode
= int_mode_for_mode (mode
);
3583 if (imode
== BLKmode
)
3592 if (FLOAT_WORDS_BIG_ENDIAN
)
3593 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3595 word
= bitpos
/ BITS_PER_WORD
;
3596 bitpos
= bitpos
% BITS_PER_WORD
;
3597 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3600 mask
= double_int_setbit (double_int_zero
, bitpos
);
3605 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3606 target
= gen_reg_rtx (mode
);
3612 for (i
= 0; i
< nwords
; ++i
)
3614 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3615 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3621 = expand_binop (imode
, and_optab
, op0_piece
,
3622 immed_double_int_const (double_int_not (mask
),
3624 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3626 op1
= expand_binop (imode
, and_optab
,
3627 operand_subword_force (op1
, i
, mode
),
3628 immed_double_int_const (mask
, imode
),
3629 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3631 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3632 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3633 if (temp
!= targ_piece
)
3634 emit_move_insn (targ_piece
, temp
);
3637 emit_move_insn (targ_piece
, op0_piece
);
3640 insns
= get_insns ();
3647 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3648 immed_double_int_const (mask
, imode
),
3649 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3651 op0
= gen_lowpart (imode
, op0
);
3653 op0
= expand_binop (imode
, and_optab
, op0
,
3654 immed_double_int_const (double_int_not (mask
),
3656 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3658 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3659 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3660 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3666 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3667 scalar floating point mode. Return NULL if we do not know how to
3668 expand the operation inline. */
3671 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3673 enum machine_mode mode
= GET_MODE (op0
);
3674 const struct real_format
*fmt
;
3678 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3679 gcc_assert (GET_MODE (op1
) == mode
);
3681 /* First try to do it with a special instruction. */
3682 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3683 target
, 0, OPTAB_DIRECT
);
3687 fmt
= REAL_MODE_FORMAT (mode
);
3688 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3692 if (GET_CODE (op0
) == CONST_DOUBLE
)
3694 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3695 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3699 if (fmt
->signbit_ro
>= 0
3700 && (GET_CODE (op0
) == CONST_DOUBLE
3701 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3702 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3704 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3705 fmt
->signbit_ro
, op0_is_abs
);
3710 if (fmt
->signbit_rw
< 0)
3712 return expand_copysign_bit (mode
, op0
, op1
, target
,
3713 fmt
->signbit_rw
, op0_is_abs
);
3716 /* Generate an instruction whose insn-code is INSN_CODE,
3717 with two operands: an output TARGET and an input OP0.
3718 TARGET *must* be nonzero, and the output is always stored there.
3719 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3720 the value that is stored into TARGET.
3722 Return false if expansion failed. */
3725 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3728 struct expand_operand ops
[2];
3731 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3732 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3733 pat
= maybe_gen_insn (icode
, 2, ops
);
3737 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3738 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3742 if (ops
[0].value
!= target
)
3743 emit_move_insn (target
, ops
[0].value
);
3746 /* Generate an instruction whose insn-code is INSN_CODE,
3747 with two operands: an output TARGET and an input OP0.
3748 TARGET *must* be nonzero, and the output is always stored there.
3749 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3750 the value that is stored into TARGET. */
3753 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3755 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3759 struct no_conflict_data
3761 rtx target
, first
, insn
;
3765 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3766 the currently examined clobber / store has to stay in the list of
3767 insns that constitute the actual libcall block. */
3769 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3771 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3773 /* If this inns directly contributes to setting the target, it must stay. */
3774 if (reg_overlap_mentioned_p (p
->target
, dest
))
3775 p
->must_stay
= true;
3776 /* If we haven't committed to keeping any other insns in the list yet,
3777 there is nothing more to check. */
3778 else if (p
->insn
== p
->first
)
3780 /* If this insn sets / clobbers a register that feeds one of the insns
3781 already in the list, this insn has to stay too. */
3782 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3783 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3784 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3785 /* Likewise if this insn depends on a register set by a previous
3786 insn in the list, or if it sets a result (presumably a hard
3787 register) that is set or clobbered by a previous insn.
3788 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3789 SET_DEST perform the former check on the address, and the latter
3790 check on the MEM. */
3791 || (GET_CODE (set
) == SET
3792 && (modified_in_p (SET_SRC (set
), p
->first
)
3793 || modified_in_p (SET_DEST (set
), p
->first
)
3794 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3795 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3796 p
->must_stay
= true;
3800 /* Emit code to make a call to a constant function or a library call.
3802 INSNS is a list containing all insns emitted in the call.
3803 These insns leave the result in RESULT. Our block is to copy RESULT
3804 to TARGET, which is logically equivalent to EQUIV.
3806 We first emit any insns that set a pseudo on the assumption that these are
3807 loading constants into registers; doing so allows them to be safely cse'ed
3808 between blocks. Then we emit all the other insns in the block, followed by
3809 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3810 note with an operand of EQUIV. */
3813 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3815 rtx final_dest
= target
;
3816 rtx next
, last
, insn
;
3818 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3819 into a MEM later. Protect the libcall block from this change. */
3820 if (! REG_P (target
) || REG_USERVAR_P (target
))
3821 target
= gen_reg_rtx (GET_MODE (target
));
3823 /* If we're using non-call exceptions, a libcall corresponding to an
3824 operation that may trap may also trap. */
3825 /* ??? See the comment in front of make_reg_eh_region_note. */
3826 if (cfun
->can_throw_non_call_exceptions
&& may_trap_p (equiv
))
3828 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3831 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3834 int lp_nr
= INTVAL (XEXP (note
, 0));
3835 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3836 remove_note (insn
, note
);
3842 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3843 reg note to indicate that this call cannot throw or execute a nonlocal
3844 goto (unless there is already a REG_EH_REGION note, in which case
3846 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3848 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3851 /* First emit all insns that set pseudos. Remove them from the list as
3852 we go. Avoid insns that set pseudos which were referenced in previous
3853 insns. These can be generated by move_by_pieces, for example,
3854 to update an address. Similarly, avoid insns that reference things
3855 set in previous insns. */
3857 for (insn
= insns
; insn
; insn
= next
)
3859 rtx set
= single_set (insn
);
3861 next
= NEXT_INSN (insn
);
3863 if (set
!= 0 && REG_P (SET_DEST (set
))
3864 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3866 struct no_conflict_data data
;
3868 data
.target
= const0_rtx
;
3872 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3873 if (! data
.must_stay
)
3875 if (PREV_INSN (insn
))
3876 NEXT_INSN (PREV_INSN (insn
)) = next
;
3881 PREV_INSN (next
) = PREV_INSN (insn
);
3887 /* Some ports use a loop to copy large arguments onto the stack.
3888 Don't move anything outside such a loop. */
3893 /* Write the remaining insns followed by the final copy. */
3894 for (insn
= insns
; insn
; insn
= next
)
3896 next
= NEXT_INSN (insn
);
3901 last
= emit_move_insn (target
, result
);
3902 if (optab_handler (mov_optab
, GET_MODE (target
)) != CODE_FOR_nothing
)
3903 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3905 if (final_dest
!= target
)
3906 emit_move_insn (final_dest
, target
);
3909 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3910 PURPOSE describes how this comparison will be used. CODE is the rtx
3911 comparison code we will be using.
3913 ??? Actually, CODE is slightly weaker than that. A target is still
3914 required to implement all of the normal bcc operations, but not
3915 required to implement all (or any) of the unordered bcc operations. */
3918 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3919 enum can_compare_purpose purpose
)
3922 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3925 enum insn_code icode
;
3927 if (purpose
== ccp_jump
3928 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3929 && insn_operand_matches (icode
, 0, test
))
3931 if (purpose
== ccp_store_flag
3932 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3933 && insn_operand_matches (icode
, 1, test
))
3935 if (purpose
== ccp_cmov
3936 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3939 mode
= GET_MODE_WIDER_MODE (mode
);
3940 PUT_MODE (test
, mode
);
3942 while (mode
!= VOIDmode
);
3947 /* This function is called when we are going to emit a compare instruction that
3948 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3950 *PMODE is the mode of the inputs (in case they are const_int).
3951 *PUNSIGNEDP nonzero says that the operands are unsigned;
3952 this matters if they need to be widened (as given by METHODS).
3954 If they have mode BLKmode, then SIZE specifies the size of both operands.
3956 This function performs all the setup necessary so that the caller only has
3957 to emit a single comparison insn. This setup can involve doing a BLKmode
3958 comparison or emitting a library call to perform the comparison if no insn
3959 is available to handle it.
3960 The values which are passed in through pointers can be modified; the caller
3961 should perform the comparison on the modified values. Constant
3962 comparisons must have already been folded. */
3965 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3966 int unsignedp
, enum optab_methods methods
,
3967 rtx
*ptest
, enum machine_mode
*pmode
)
3969 enum machine_mode mode
= *pmode
;
3971 enum machine_mode cmp_mode
;
3972 enum mode_class mclass
;
3974 /* The other methods are not needed. */
3975 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3976 || methods
== OPTAB_LIB_WIDEN
);
3978 /* If we are optimizing, force expensive constants into a register. */
3979 if (CONSTANT_P (x
) && optimize
3980 && (rtx_cost (x
, COMPARE
, 0, optimize_insn_for_speed_p ())
3981 > COSTS_N_INSNS (1)))
3982 x
= force_reg (mode
, x
);
3984 if (CONSTANT_P (y
) && optimize
3985 && (rtx_cost (y
, COMPARE
, 1, optimize_insn_for_speed_p ())
3986 > COSTS_N_INSNS (1)))
3987 y
= force_reg (mode
, y
);
3990 /* Make sure if we have a canonical comparison. The RTL
3991 documentation states that canonical comparisons are required only
3992 for targets which have cc0. */
3993 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3996 /* Don't let both operands fail to indicate the mode. */
3997 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3998 x
= force_reg (mode
, x
);
3999 if (mode
== VOIDmode
)
4000 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4002 /* Handle all BLKmode compares. */
4004 if (mode
== BLKmode
)
4006 enum machine_mode result_mode
;
4007 enum insn_code cmp_code
;
4012 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4016 /* Try to use a memory block compare insn - either cmpstr
4017 or cmpmem will do. */
4018 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4019 cmp_mode
!= VOIDmode
;
4020 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4022 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4023 if (cmp_code
== CODE_FOR_nothing
)
4024 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4025 if (cmp_code
== CODE_FOR_nothing
)
4026 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4027 if (cmp_code
== CODE_FOR_nothing
)
4030 /* Must make sure the size fits the insn's mode. */
4031 if ((CONST_INT_P (size
)
4032 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4033 || (GET_MODE_BITSIZE (GET_MODE (size
))
4034 > GET_MODE_BITSIZE (cmp_mode
)))
4037 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4038 result
= gen_reg_rtx (result_mode
);
4039 size
= convert_to_mode (cmp_mode
, size
, 1);
4040 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4042 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4043 *pmode
= result_mode
;
4047 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4050 /* Otherwise call a library function, memcmp. */
4051 libfunc
= memcmp_libfunc
;
4052 length_type
= sizetype
;
4053 result_mode
= TYPE_MODE (integer_type_node
);
4054 cmp_mode
= TYPE_MODE (length_type
);
4055 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4056 TYPE_UNSIGNED (length_type
));
4058 result
= emit_library_call_value (libfunc
, 0, LCT_PURE
,
4064 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4065 *pmode
= result_mode
;
4069 /* Don't allow operands to the compare to trap, as that can put the
4070 compare and branch in different basic blocks. */
4071 if (cfun
->can_throw_non_call_exceptions
)
4074 x
= force_reg (mode
, x
);
4076 y
= force_reg (mode
, y
);
4079 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4081 gcc_assert (can_compare_p (comparison
, CCmode
, ccp_jump
));
4082 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4086 mclass
= GET_MODE_CLASS (mode
);
4087 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4091 enum insn_code icode
;
4092 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4093 if (icode
!= CODE_FOR_nothing
4094 && insn_operand_matches (icode
, 0, test
))
4096 rtx last
= get_last_insn ();
4097 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4098 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4100 && insn_operand_matches (icode
, 1, op0
)
4101 && insn_operand_matches (icode
, 2, op1
))
4103 XEXP (test
, 0) = op0
;
4104 XEXP (test
, 1) = op1
;
4109 delete_insns_since (last
);
4112 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4114 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
4116 while (cmp_mode
!= VOIDmode
);
4118 if (methods
!= OPTAB_LIB_WIDEN
)
4121 if (!SCALAR_FLOAT_MODE_P (mode
))
4125 /* Handle a libcall just for the mode we are using. */
4126 libfunc
= optab_libfunc (cmp_optab
, mode
);
4127 gcc_assert (libfunc
);
4129 /* If we want unsigned, and this mode has a distinct unsigned
4130 comparison routine, use that. */
4133 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4138 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4139 targetm
.libgcc_cmp_return_mode (),
4140 2, x
, mode
, y
, mode
);
4142 /* There are two kinds of comparison routines. Biased routines
4143 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4144 of gcc expect that the comparison operation is equivalent
4145 to the modified comparison. For signed comparisons compare the
4146 result against 1 in the biased case, and zero in the unbiased
4147 case. For unsigned comparisons always compare against 1 after
4148 biasing the unbiased result by adding 1. This gives us a way to
4150 The comparisons in the fixed-point helper library are always
4155 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4158 x
= plus_constant (result
, 1);
4164 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4168 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4176 /* Before emitting an insn with code ICODE, make sure that X, which is going
4177 to be used for operand OPNUM of the insn, is converted from mode MODE to
4178 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4179 that it is accepted by the operand predicate. Return the new value. */
4182 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, enum machine_mode mode
,
4183 enum machine_mode wider_mode
, int unsignedp
)
4185 if (mode
!= wider_mode
)
4186 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4188 if (!insn_operand_matches (icode
, opnum
, x
))
4190 if (reload_completed
)
4192 x
= copy_to_mode_reg (insn_data
[(int) icode
].operand
[opnum
].mode
, x
);
4198 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4199 we can do the branch. */
4202 emit_cmp_and_jump_insn_1 (rtx test
, enum machine_mode mode
, rtx label
)
4204 enum machine_mode optab_mode
;
4205 enum mode_class mclass
;
4206 enum insn_code icode
;
4208 mclass
= GET_MODE_CLASS (mode
);
4209 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4210 icode
= optab_handler (cbranch_optab
, optab_mode
);
4212 gcc_assert (icode
!= CODE_FOR_nothing
);
4213 gcc_assert (insn_operand_matches (icode
, 0, test
));
4214 emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0), XEXP (test
, 1), label
));
4217 /* Generate code to compare X with Y so that the condition codes are
4218 set and to jump to LABEL if the condition is true. If X is a
4219 constant and Y is not a constant, then the comparison is swapped to
4220 ensure that the comparison RTL has the canonical form.
4222 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4223 need to be widened. UNSIGNEDP is also used to select the proper
4224 branch condition code.
4226 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4228 MODE is the mode of the inputs (in case they are const_int).
4230 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4231 It will be potentially converted into an unsigned variant based on
4232 UNSIGNEDP to select a proper jump instruction. */
4235 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4236 enum machine_mode mode
, int unsignedp
, rtx label
)
4238 rtx op0
= x
, op1
= y
;
4241 /* Swap operands and condition to ensure canonical RTL. */
4242 if (swap_commutative_operands_p (x
, y
)
4243 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4246 comparison
= swap_condition (comparison
);
4249 /* If OP0 is still a constant, then both X and Y must be constants
4250 or the opposite comparison is not supported. Force X into a register
4251 to create canonical RTL. */
4252 if (CONSTANT_P (op0
))
4253 op0
= force_reg (mode
, op0
);
4256 comparison
= unsigned_condition (comparison
);
4258 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4260 emit_cmp_and_jump_insn_1 (test
, mode
, label
);
4264 /* Emit a library call comparison between floating point X and Y.
4265 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4268 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4269 rtx
*ptest
, enum machine_mode
*pmode
)
4271 enum rtx_code swapped
= swap_condition (comparison
);
4272 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4273 enum machine_mode orig_mode
= GET_MODE (x
);
4274 enum machine_mode mode
, cmp_mode
;
4275 rtx true_rtx
, false_rtx
;
4276 rtx value
, target
, insns
, equiv
;
4278 bool reversed_p
= false;
4279 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4281 for (mode
= orig_mode
;
4283 mode
= GET_MODE_WIDER_MODE (mode
))
4285 if (code_to_optab
[comparison
]
4286 && (libfunc
= optab_libfunc (code_to_optab
[comparison
], mode
)))
4289 if (code_to_optab
[swapped
]
4290 && (libfunc
= optab_libfunc (code_to_optab
[swapped
], mode
)))
4293 tmp
= x
; x
= y
; y
= tmp
;
4294 comparison
= swapped
;
4298 if (code_to_optab
[reversed
]
4299 && (libfunc
= optab_libfunc (code_to_optab
[reversed
], mode
)))
4301 comparison
= reversed
;
4307 gcc_assert (mode
!= VOIDmode
);
4309 if (mode
!= orig_mode
)
4311 x
= convert_to_mode (mode
, x
, 0);
4312 y
= convert_to_mode (mode
, y
, 0);
4315 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4316 the RTL. The allows the RTL optimizers to delete the libcall if the
4317 condition can be determined at compile-time. */
4318 if (comparison
== UNORDERED
4319 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4321 true_rtx
= const_true_rtx
;
4322 false_rtx
= const0_rtx
;
4329 true_rtx
= const0_rtx
;
4330 false_rtx
= const_true_rtx
;
4334 true_rtx
= const_true_rtx
;
4335 false_rtx
= const0_rtx
;
4339 true_rtx
= const1_rtx
;
4340 false_rtx
= const0_rtx
;
4344 true_rtx
= const0_rtx
;
4345 false_rtx
= constm1_rtx
;
4349 true_rtx
= constm1_rtx
;
4350 false_rtx
= const0_rtx
;
4354 true_rtx
= const0_rtx
;
4355 false_rtx
= const1_rtx
;
4363 if (comparison
== UNORDERED
)
4365 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4366 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4367 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4368 temp
, const_true_rtx
, equiv
);
4372 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4373 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4374 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4375 equiv
, true_rtx
, false_rtx
);
4379 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4380 cmp_mode
, 2, x
, mode
, y
, mode
);
4381 insns
= get_insns ();
4384 target
= gen_reg_rtx (cmp_mode
);
4385 emit_libcall_block (insns
, target
, value
, equiv
);
4387 if (comparison
== UNORDERED
4388 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4390 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4392 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4397 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4400 emit_indirect_jump (rtx loc
)
4402 struct expand_operand ops
[1];
4404 create_address_operand (&ops
[0], loc
);
4405 expand_jump_insn (CODE_FOR_indirect_jump
, 1, ops
);
4409 #ifdef HAVE_conditional_move
4411 /* Emit a conditional move instruction if the machine supports one for that
4412 condition and machine mode.
4414 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4415 the mode to use should they be constants. If it is VOIDmode, they cannot
4418 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4419 should be stored there. MODE is the mode to use should they be constants.
4420 If it is VOIDmode, they cannot both be constants.
4422 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4423 is not supported. */
4426 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4427 enum machine_mode cmode
, rtx op2
, rtx op3
,
4428 enum machine_mode mode
, int unsignedp
)
4430 rtx tem
, comparison
, last
;
4431 enum insn_code icode
;
4432 enum rtx_code reversed
;
4434 /* If one operand is constant, make it the second one. Only do this
4435 if the other operand is not constant as well. */
4437 if (swap_commutative_operands_p (op0
, op1
))
4442 code
= swap_condition (code
);
4445 /* get_condition will prefer to generate LT and GT even if the old
4446 comparison was against zero, so undo that canonicalization here since
4447 comparisons against zero are cheaper. */
4448 if (code
== LT
&& op1
== const1_rtx
)
4449 code
= LE
, op1
= const0_rtx
;
4450 else if (code
== GT
&& op1
== constm1_rtx
)
4451 code
= GE
, op1
= const0_rtx
;
4453 if (cmode
== VOIDmode
)
4454 cmode
= GET_MODE (op0
);
4456 if (swap_commutative_operands_p (op2
, op3
)
4457 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4466 if (mode
== VOIDmode
)
4467 mode
= GET_MODE (op2
);
4469 icode
= direct_optab_handler (movcc_optab
, mode
);
4471 if (icode
== CODE_FOR_nothing
)
4475 target
= gen_reg_rtx (mode
);
4477 code
= unsignedp
? unsigned_condition (code
) : code
;
4478 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4480 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4481 return NULL and let the caller figure out how best to deal with this
4483 if (!COMPARISON_P (comparison
))
4486 do_pending_stack_adjust ();
4487 last
= get_last_insn ();
4488 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4489 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4490 &comparison
, &cmode
);
4493 struct expand_operand ops
[4];
4495 create_output_operand (&ops
[0], target
, mode
);
4496 create_fixed_operand (&ops
[1], comparison
);
4497 create_input_operand (&ops
[2], op2
, mode
);
4498 create_input_operand (&ops
[3], op3
, mode
);
4499 if (maybe_expand_insn (icode
, 4, ops
))
4501 if (ops
[0].value
!= target
)
4502 convert_move (target
, ops
[0].value
, false);
4506 delete_insns_since (last
);
4510 /* Return nonzero if a conditional move of mode MODE is supported.
4512 This function is for combine so it can tell whether an insn that looks
4513 like a conditional move is actually supported by the hardware. If we
4514 guess wrong we lose a bit on optimization, but that's it. */
4515 /* ??? sparc64 supports conditionally moving integers values based on fp
4516 comparisons, and vice versa. How do we handle them? */
4519 can_conditionally_move_p (enum machine_mode mode
)
4521 if (direct_optab_handler (movcc_optab
, mode
) != CODE_FOR_nothing
)
4527 #endif /* HAVE_conditional_move */
4529 /* Emit a conditional addition instruction if the machine supports one for that
4530 condition and machine mode.
4532 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4533 the mode to use should they be constants. If it is VOIDmode, they cannot
4536 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4537 should be stored there. MODE is the mode to use should they be constants.
4538 If it is VOIDmode, they cannot both be constants.
4540 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4541 is not supported. */
4544 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4545 enum machine_mode cmode
, rtx op2
, rtx op3
,
4546 enum machine_mode mode
, int unsignedp
)
4548 rtx tem
, comparison
, last
;
4549 enum insn_code icode
;
4550 enum rtx_code reversed
;
4552 /* If one operand is constant, make it the second one. Only do this
4553 if the other operand is not constant as well. */
4555 if (swap_commutative_operands_p (op0
, op1
))
4560 code
= swap_condition (code
);
4563 /* get_condition will prefer to generate LT and GT even if the old
4564 comparison was against zero, so undo that canonicalization here since
4565 comparisons against zero are cheaper. */
4566 if (code
== LT
&& op1
== const1_rtx
)
4567 code
= LE
, op1
= const0_rtx
;
4568 else if (code
== GT
&& op1
== constm1_rtx
)
4569 code
= GE
, op1
= const0_rtx
;
4571 if (cmode
== VOIDmode
)
4572 cmode
= GET_MODE (op0
);
4574 if (swap_commutative_operands_p (op2
, op3
)
4575 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4584 if (mode
== VOIDmode
)
4585 mode
= GET_MODE (op2
);
4587 icode
= optab_handler (addcc_optab
, mode
);
4589 if (icode
== CODE_FOR_nothing
)
4593 target
= gen_reg_rtx (mode
);
4595 code
= unsignedp
? unsigned_condition (code
) : code
;
4596 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4598 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4599 return NULL and let the caller figure out how best to deal with this
4601 if (!COMPARISON_P (comparison
))
4604 do_pending_stack_adjust ();
4605 last
= get_last_insn ();
4606 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4607 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4608 &comparison
, &cmode
);
4611 struct expand_operand ops
[4];
4613 create_output_operand (&ops
[0], target
, mode
);
4614 create_fixed_operand (&ops
[1], comparison
);
4615 create_input_operand (&ops
[2], op2
, mode
);
4616 create_input_operand (&ops
[3], op3
, mode
);
4617 if (maybe_expand_insn (icode
, 4, ops
))
4619 if (ops
[0].value
!= target
)
4620 convert_move (target
, ops
[0].value
, false);
4624 delete_insns_since (last
);
4628 /* These functions attempt to generate an insn body, rather than
4629 emitting the insn, but if the gen function already emits them, we
4630 make no attempt to turn them back into naked patterns. */
4632 /* Generate and return an insn body to add Y to X. */
4635 gen_add2_insn (rtx x
, rtx y
)
4637 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4639 gcc_assert (insn_operand_matches (icode
, 0, x
));
4640 gcc_assert (insn_operand_matches (icode
, 1, x
));
4641 gcc_assert (insn_operand_matches (icode
, 2, y
));
4643 return GEN_FCN (icode
) (x
, x
, y
);
4646 /* Generate and return an insn body to add r1 and c,
4647 storing the result in r0. */
4650 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4652 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4654 if (icode
== CODE_FOR_nothing
4655 || !insn_operand_matches (icode
, 0, r0
)
4656 || !insn_operand_matches (icode
, 1, r1
)
4657 || !insn_operand_matches (icode
, 2, c
))
4660 return GEN_FCN (icode
) (r0
, r1
, c
);
4664 have_add2_insn (rtx x
, rtx y
)
4666 enum insn_code icode
;
4668 gcc_assert (GET_MODE (x
) != VOIDmode
);
4670 icode
= optab_handler (add_optab
, GET_MODE (x
));
4672 if (icode
== CODE_FOR_nothing
)
4675 if (!insn_operand_matches (icode
, 0, x
)
4676 || !insn_operand_matches (icode
, 1, x
)
4677 || !insn_operand_matches (icode
, 2, y
))
4683 /* Generate and return an insn body to subtract Y from X. */
4686 gen_sub2_insn (rtx x
, rtx y
)
4688 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4690 gcc_assert (insn_operand_matches (icode
, 0, x
));
4691 gcc_assert (insn_operand_matches (icode
, 1, x
));
4692 gcc_assert (insn_operand_matches (icode
, 2, y
));
4694 return GEN_FCN (icode
) (x
, x
, y
);
4697 /* Generate and return an insn body to subtract r1 and c,
4698 storing the result in r0. */
4701 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4703 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4705 if (icode
== CODE_FOR_nothing
4706 || !insn_operand_matches (icode
, 0, r0
)
4707 || !insn_operand_matches (icode
, 1, r1
)
4708 || !insn_operand_matches (icode
, 2, c
))
4711 return GEN_FCN (icode
) (r0
, r1
, c
);
4715 have_sub2_insn (rtx x
, rtx y
)
4717 enum insn_code icode
;
4719 gcc_assert (GET_MODE (x
) != VOIDmode
);
4721 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4723 if (icode
== CODE_FOR_nothing
)
4726 if (!insn_operand_matches (icode
, 0, x
)
4727 || !insn_operand_matches (icode
, 1, x
)
4728 || !insn_operand_matches (icode
, 2, y
))
4734 /* Generate the body of an instruction to copy Y into X.
4735 It may be a list of insns, if one insn isn't enough. */
4738 gen_move_insn (rtx x
, rtx y
)
4743 emit_move_insn_1 (x
, y
);
4749 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4750 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4751 no such operation exists, CODE_FOR_nothing will be returned. */
4754 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4758 #ifdef HAVE_ptr_extend
4760 return CODE_FOR_ptr_extend
;
4763 tab
= unsignedp
? zext_optab
: sext_optab
;
4764 return convert_optab_handler (tab
, to_mode
, from_mode
);
4767 /* Generate the body of an insn to extend Y (with mode MFROM)
4768 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4771 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4772 enum machine_mode mfrom
, int unsignedp
)
4774 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4775 return GEN_FCN (icode
) (x
, y
);
4778 /* can_fix_p and can_float_p say whether the target machine
4779 can directly convert a given fixed point type to
4780 a given floating point type, or vice versa.
4781 The returned value is the CODE_FOR_... value to use,
4782 or CODE_FOR_nothing if these modes cannot be directly converted.
4784 *TRUNCP_PTR is set to 1 if it is necessary to output
4785 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4787 static enum insn_code
4788 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4789 int unsignedp
, int *truncp_ptr
)
4792 enum insn_code icode
;
4794 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4795 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4796 if (icode
!= CODE_FOR_nothing
)
4802 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4803 for this to work. We need to rework the fix* and ftrunc* patterns
4804 and documentation. */
4805 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4806 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4807 if (icode
!= CODE_FOR_nothing
4808 && optab_handler (ftrunc_optab
, fltmode
) != CODE_FOR_nothing
)
4815 return CODE_FOR_nothing
;
4819 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4824 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4825 return convert_optab_handler (tab
, fltmode
, fixmode
);
4828 /* Function supportable_convert_operation
4830 Check whether an operation represented by the code CODE is a
4831 convert operation that is supported by the target platform in
4832 vector form (i.e., when operating on arguments of type VECTYPE_IN
4833 producing a result of type VECTYPE_OUT).
4835 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4836 This function checks if these operations are supported
4837 by the target platform either directly (via vector tree-codes), or via
4841 - CODE1 is code of vector operation to be used when
4842 vectorizing the operation, if available.
4843 - DECL is decl of target builtin functions to be used
4844 when vectorizing the operation, if available. In this case,
4845 CODE1 is CALL_EXPR. */
4848 supportable_convert_operation (enum tree_code code
,
4849 tree vectype_out
, tree vectype_in
,
4850 tree
*decl
, enum tree_code
*code1
)
4852 enum machine_mode m1
,m2
;
4855 m1
= TYPE_MODE (vectype_out
);
4856 m2
= TYPE_MODE (vectype_in
);
4858 /* First check if we can done conversion directly. */
4859 if ((code
== FIX_TRUNC_EXPR
4860 && can_fix_p (m1
,m2
,TYPE_UNSIGNED (vectype_out
), &truncp
)
4861 != CODE_FOR_nothing
)
4862 || (code
== FLOAT_EXPR
4863 && can_float_p (m1
,m2
,TYPE_UNSIGNED (vectype_in
))
4864 != CODE_FOR_nothing
))
4870 /* Now check for builtin. */
4871 if (targetm
.vectorize
.builtin_conversion
4872 && targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
4875 *decl
= targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
);
4882 /* Generate code to convert FROM to floating point
4883 and store in TO. FROM must be fixed point and not VOIDmode.
4884 UNSIGNEDP nonzero means regard FROM as unsigned.
4885 Normally this is done by correcting the final value
4886 if it is negative. */
4889 expand_float (rtx to
, rtx from
, int unsignedp
)
4891 enum insn_code icode
;
4893 enum machine_mode fmode
, imode
;
4894 bool can_do_signed
= false;
4896 /* Crash now, because we won't be able to decide which mode to use. */
4897 gcc_assert (GET_MODE (from
) != VOIDmode
);
4899 /* Look for an insn to do the conversion. Do it in the specified
4900 modes if possible; otherwise convert either input, output or both to
4901 wider mode. If the integer mode is wider than the mode of FROM,
4902 we can do the conversion signed even if the input is unsigned. */
4904 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4905 fmode
= GET_MODE_WIDER_MODE (fmode
))
4906 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4907 imode
= GET_MODE_WIDER_MODE (imode
))
4909 int doing_unsigned
= unsignedp
;
4911 if (fmode
!= GET_MODE (to
)
4912 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4915 icode
= can_float_p (fmode
, imode
, unsignedp
);
4916 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4918 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4919 if (scode
!= CODE_FOR_nothing
)
4920 can_do_signed
= true;
4921 if (imode
!= GET_MODE (from
))
4922 icode
= scode
, doing_unsigned
= 0;
4925 if (icode
!= CODE_FOR_nothing
)
4927 if (imode
!= GET_MODE (from
))
4928 from
= convert_to_mode (imode
, from
, unsignedp
);
4930 if (fmode
!= GET_MODE (to
))
4931 target
= gen_reg_rtx (fmode
);
4933 emit_unop_insn (icode
, target
, from
,
4934 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4937 convert_move (to
, target
, 0);
4942 /* Unsigned integer, and no way to convert directly. Convert as signed,
4943 then unconditionally adjust the result. */
4944 if (unsignedp
&& can_do_signed
)
4946 rtx label
= gen_label_rtx ();
4948 REAL_VALUE_TYPE offset
;
4950 /* Look for a usable floating mode FMODE wider than the source and at
4951 least as wide as the target. Using FMODE will avoid rounding woes
4952 with unsigned values greater than the signed maximum value. */
4954 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4955 fmode
= GET_MODE_WIDER_MODE (fmode
))
4956 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4957 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4960 if (fmode
== VOIDmode
)
4962 /* There is no such mode. Pretend the target is wide enough. */
4963 fmode
= GET_MODE (to
);
4965 /* Avoid double-rounding when TO is narrower than FROM. */
4966 if ((significand_size (fmode
) + 1)
4967 < GET_MODE_PRECISION (GET_MODE (from
)))
4970 rtx neglabel
= gen_label_rtx ();
4972 /* Don't use TARGET if it isn't a register, is a hard register,
4973 or is the wrong mode. */
4975 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4976 || GET_MODE (target
) != fmode
)
4977 target
= gen_reg_rtx (fmode
);
4979 imode
= GET_MODE (from
);
4980 do_pending_stack_adjust ();
4982 /* Test whether the sign bit is set. */
4983 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4986 /* The sign bit is not set. Convert as signed. */
4987 expand_float (target
, from
, 0);
4988 emit_jump_insn (gen_jump (label
));
4991 /* The sign bit is set.
4992 Convert to a usable (positive signed) value by shifting right
4993 one bit, while remembering if a nonzero bit was shifted
4994 out; i.e., compute (from & 1) | (from >> 1). */
4996 emit_label (neglabel
);
4997 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4998 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4999 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5000 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5002 expand_float (target
, temp
, 0);
5004 /* Multiply by 2 to undo the shift above. */
5005 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5006 target
, 0, OPTAB_LIB_WIDEN
);
5008 emit_move_insn (target
, temp
);
5010 do_pending_stack_adjust ();
5016 /* If we are about to do some arithmetic to correct for an
5017 unsigned operand, do it in a pseudo-register. */
5019 if (GET_MODE (to
) != fmode
5020 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5021 target
= gen_reg_rtx (fmode
);
5023 /* Convert as signed integer to floating. */
5024 expand_float (target
, from
, 0);
5026 /* If FROM is negative (and therefore TO is negative),
5027 correct its value by 2**bitwidth. */
5029 do_pending_stack_adjust ();
5030 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5034 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
5035 temp
= expand_binop (fmode
, add_optab
, target
,
5036 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5037 target
, 0, OPTAB_LIB_WIDEN
);
5039 emit_move_insn (target
, temp
);
5041 do_pending_stack_adjust ();
5046 /* No hardware instruction available; call a library routine. */
5051 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5053 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
5054 from
= convert_to_mode (SImode
, from
, unsignedp
);
5056 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5057 gcc_assert (libfunc
);
5061 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5062 GET_MODE (to
), 1, from
,
5064 insns
= get_insns ();
5067 emit_libcall_block (insns
, target
, value
,
5068 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5069 GET_MODE (to
), from
));
5074 /* Copy result to requested destination
5075 if we have been computing in a temp location. */
5079 if (GET_MODE (target
) == GET_MODE (to
))
5080 emit_move_insn (to
, target
);
5082 convert_move (to
, target
, 0);
5086 /* Generate code to convert FROM to fixed point and store in TO. FROM
5087 must be floating point. */
5090 expand_fix (rtx to
, rtx from
, int unsignedp
)
5092 enum insn_code icode
;
5094 enum machine_mode fmode
, imode
;
5097 /* We first try to find a pair of modes, one real and one integer, at
5098 least as wide as FROM and TO, respectively, in which we can open-code
5099 this conversion. If the integer mode is wider than the mode of TO,
5100 we can do the conversion either signed or unsigned. */
5102 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5103 fmode
= GET_MODE_WIDER_MODE (fmode
))
5104 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5105 imode
= GET_MODE_WIDER_MODE (imode
))
5107 int doing_unsigned
= unsignedp
;
5109 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5110 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5111 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5113 if (icode
!= CODE_FOR_nothing
)
5115 rtx last
= get_last_insn ();
5116 if (fmode
!= GET_MODE (from
))
5117 from
= convert_to_mode (fmode
, from
, 0);
5121 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5122 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5126 if (imode
!= GET_MODE (to
))
5127 target
= gen_reg_rtx (imode
);
5129 if (maybe_emit_unop_insn (icode
, target
, from
,
5130 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5133 convert_move (to
, target
, unsignedp
);
5136 delete_insns_since (last
);
5140 /* For an unsigned conversion, there is one more way to do it.
5141 If we have a signed conversion, we generate code that compares
5142 the real value to the largest representable positive number. If if
5143 is smaller, the conversion is done normally. Otherwise, subtract
5144 one plus the highest signed number, convert, and add it back.
5146 We only need to check all real modes, since we know we didn't find
5147 anything with a wider integer mode.
5149 This code used to extend FP value into mode wider than the destination.
5150 This is needed for decimal float modes which cannot accurately
5151 represent one plus the highest signed number of the same size, but
5152 not for binary modes. Consider, for instance conversion from SFmode
5155 The hot path through the code is dealing with inputs smaller than 2^63
5156 and doing just the conversion, so there is no bits to lose.
5158 In the other path we know the value is positive in the range 2^63..2^64-1
5159 inclusive. (as for other input overflow happens and result is undefined)
5160 So we know that the most important bit set in mantissa corresponds to
5161 2^63. The subtraction of 2^63 should not generate any rounding as it
5162 simply clears out that bit. The rest is trivial. */
5164 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5165 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5166 fmode
= GET_MODE_WIDER_MODE (fmode
))
5167 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5168 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5169 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
5172 REAL_VALUE_TYPE offset
;
5173 rtx limit
, lab1
, lab2
, insn
;
5175 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
5176 real_2expN (&offset
, bitsize
- 1, fmode
);
5177 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5178 lab1
= gen_label_rtx ();
5179 lab2
= gen_label_rtx ();
5181 if (fmode
!= GET_MODE (from
))
5182 from
= convert_to_mode (fmode
, from
, 0);
5184 /* See if we need to do the subtraction. */
5185 do_pending_stack_adjust ();
5186 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5189 /* If not, do the signed "fix" and branch around fixup code. */
5190 expand_fix (to
, from
, 0);
5191 emit_jump_insn (gen_jump (lab2
));
5194 /* Otherwise, subtract 2**(N-1), convert to signed number,
5195 then add 2**(N-1). Do the addition using XOR since this
5196 will often generate better code. */
5198 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5199 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5200 expand_fix (to
, target
, 0);
5201 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5203 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5205 to
, 1, OPTAB_LIB_WIDEN
);
5208 emit_move_insn (to
, target
);
5212 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
5214 /* Make a place for a REG_NOTE and add it. */
5215 insn
= emit_move_insn (to
, to
);
5216 set_unique_reg_note (insn
,
5218 gen_rtx_fmt_e (UNSIGNED_FIX
,
5226 /* We can't do it with an insn, so use a library call. But first ensure
5227 that the mode of TO is at least as wide as SImode, since those are the
5228 only library calls we know about. */
5230 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5232 target
= gen_reg_rtx (SImode
);
5234 expand_fix (target
, from
, unsignedp
);
5242 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5243 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5244 gcc_assert (libfunc
);
5248 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5249 GET_MODE (to
), 1, from
,
5251 insns
= get_insns ();
5254 emit_libcall_block (insns
, target
, value
,
5255 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5256 GET_MODE (to
), from
));
5261 if (GET_MODE (to
) == GET_MODE (target
))
5262 emit_move_insn (to
, target
);
5264 convert_move (to
, target
, 0);
5268 /* Generate code to convert FROM or TO a fixed-point.
5269 If UINTP is true, either TO or FROM is an unsigned integer.
5270 If SATP is true, we need to saturate the result. */
5273 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5275 enum machine_mode to_mode
= GET_MODE (to
);
5276 enum machine_mode from_mode
= GET_MODE (from
);
5278 enum rtx_code this_code
;
5279 enum insn_code code
;
5283 if (to_mode
== from_mode
)
5285 emit_move_insn (to
, from
);
5291 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5292 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5296 tab
= satp
? satfract_optab
: fract_optab
;
5297 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5299 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5300 if (code
!= CODE_FOR_nothing
)
5302 emit_unop_insn (code
, to
, from
, this_code
);
5306 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5307 gcc_assert (libfunc
);
5310 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5311 1, from
, from_mode
);
5312 insns
= get_insns ();
5315 emit_libcall_block (insns
, to
, value
,
5316 gen_rtx_fmt_e (tab
->code
, to_mode
, from
));
5319 /* Generate code to convert FROM to fixed point and store in TO. FROM
5320 must be floating point, TO must be signed. Use the conversion optab
5321 TAB to do the conversion. */
5324 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5326 enum insn_code icode
;
5328 enum machine_mode fmode
, imode
;
5330 /* We first try to find a pair of modes, one real and one integer, at
5331 least as wide as FROM and TO, respectively, in which we can open-code
5332 this conversion. If the integer mode is wider than the mode of TO,
5333 we can do the conversion either signed or unsigned. */
5335 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5336 fmode
= GET_MODE_WIDER_MODE (fmode
))
5337 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5338 imode
= GET_MODE_WIDER_MODE (imode
))
5340 icode
= convert_optab_handler (tab
, imode
, fmode
);
5341 if (icode
!= CODE_FOR_nothing
)
5343 rtx last
= get_last_insn ();
5344 if (fmode
!= GET_MODE (from
))
5345 from
= convert_to_mode (fmode
, from
, 0);
5347 if (imode
!= GET_MODE (to
))
5348 target
= gen_reg_rtx (imode
);
5350 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5352 delete_insns_since (last
);
5356 convert_move (to
, target
, 0);
5364 /* Report whether we have an instruction to perform the operation
5365 specified by CODE on operands of mode MODE. */
5367 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5369 return (code_to_optab
[(int) code
] != 0
5370 && (optab_handler (code_to_optab
[(int) code
], mode
)
5371 != CODE_FOR_nothing
));
5374 /* Set all insn_code fields to CODE_FOR_nothing. */
5377 init_insn_codes (void)
5379 memset (optab_table
, 0, sizeof (optab_table
));
5380 memset (convert_optab_table
, 0, sizeof (convert_optab_table
));
5381 memset (direct_optab_table
, 0, sizeof (direct_optab_table
));
5384 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5386 init_optab (optab op
, enum rtx_code code
)
5389 code_to_optab
[(int) code
] = op
;
5392 /* Same, but fill in its code as CODE, and do _not_ write it into
5393 the code_to_optab table. */
5395 init_optabv (optab op
, enum rtx_code code
)
5400 /* Conversion optabs never go in the code_to_optab table. */
5402 init_convert_optab (convert_optab op
, enum rtx_code code
)
5407 /* Initialize the libfunc fields of an entire group of entries in some
5408 optab. Each entry is set equal to a string consisting of a leading
5409 pair of underscores followed by a generic operation name followed by
5410 a mode name (downshifted to lowercase) followed by a single character
5411 representing the number of operands for the given operation (which is
5412 usually one of the characters '2', '3', or '4').
5414 OPTABLE is the table in which libfunc fields are to be initialized.
5415 OPNAME is the generic (string) name of the operation.
5416 SUFFIX is the character which specifies the number of operands for
5417 the given generic operation.
5418 MODE is the mode to generate for.
5422 gen_libfunc (optab optable
, const char *opname
, int suffix
, enum machine_mode mode
)
5424 unsigned opname_len
= strlen (opname
);
5425 const char *mname
= GET_MODE_NAME (mode
);
5426 unsigned mname_len
= strlen (mname
);
5427 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5428 int len
= prefix_len
+ opname_len
+ mname_len
+ 1 + 1;
5429 char *libfunc_name
= XALLOCAVEC (char, len
);
5436 if (targetm
.libfunc_gnu_prefix
)
5443 for (q
= opname
; *q
; )
5445 for (q
= mname
; *q
; q
++)
5446 *p
++ = TOLOWER (*q
);
5450 set_optab_libfunc (optable
, mode
,
5451 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5454 /* Like gen_libfunc, but verify that integer operation is involved. */
5457 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5458 enum machine_mode mode
)
5460 int maxsize
= 2 * BITS_PER_WORD
;
5462 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5464 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5465 maxsize
= LONG_LONG_TYPE_SIZE
;
5466 if (GET_MODE_CLASS (mode
) != MODE_INT
5467 || mode
< word_mode
|| GET_MODE_BITSIZE (mode
) > maxsize
)
5469 gen_libfunc (optable
, opname
, suffix
, mode
);
5472 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5475 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5476 enum machine_mode mode
)
5480 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5481 gen_libfunc (optable
, opname
, suffix
, mode
);
5482 if (DECIMAL_FLOAT_MODE_P (mode
))
5484 dec_opname
= XALLOCAVEC (char, sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5485 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5486 depending on the low level floating format used. */
5487 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5488 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5489 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5493 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5496 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5497 enum machine_mode mode
)
5499 if (!ALL_FIXED_POINT_MODE_P (mode
))
5501 gen_libfunc (optable
, opname
, suffix
, mode
);
5504 /* Like gen_libfunc, but verify that signed fixed-point operation is
5508 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5509 enum machine_mode mode
)
5511 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5513 gen_libfunc (optable
, opname
, suffix
, mode
);
5516 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5520 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5521 enum machine_mode mode
)
5523 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5525 gen_libfunc (optable
, opname
, suffix
, mode
);
5528 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5531 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5532 enum machine_mode mode
)
5534 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5535 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5536 if (INTEGRAL_MODE_P (mode
))
5537 gen_int_libfunc (optable
, name
, suffix
, mode
);
5540 /* Like gen_libfunc, but verify that FP or INT operation is involved
5541 and add 'v' suffix for integer operation. */
5544 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5545 enum machine_mode mode
)
5547 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5548 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5549 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5551 int len
= strlen (name
);
5552 char *v_name
= XALLOCAVEC (char, len
+ 2);
5553 strcpy (v_name
, name
);
5555 v_name
[len
+ 1] = 0;
5556 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5560 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5564 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5565 enum machine_mode mode
)
5567 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5568 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5569 if (INTEGRAL_MODE_P (mode
))
5570 gen_int_libfunc (optable
, name
, suffix
, mode
);
5571 if (ALL_FIXED_POINT_MODE_P (mode
))
5572 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5575 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5579 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5580 enum machine_mode mode
)
5582 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5583 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5584 if (INTEGRAL_MODE_P (mode
))
5585 gen_int_libfunc (optable
, name
, suffix
, mode
);
5586 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5587 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5590 /* Like gen_libfunc, but verify that INT or FIXED operation is
5594 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5595 enum machine_mode mode
)
5597 if (INTEGRAL_MODE_P (mode
))
5598 gen_int_libfunc (optable
, name
, suffix
, mode
);
5599 if (ALL_FIXED_POINT_MODE_P (mode
))
5600 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5603 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5607 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5608 enum machine_mode mode
)
5610 if (INTEGRAL_MODE_P (mode
))
5611 gen_int_libfunc (optable
, name
, suffix
, mode
);
5612 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5613 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5616 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5620 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5621 enum machine_mode mode
)
5623 if (INTEGRAL_MODE_P (mode
))
5624 gen_int_libfunc (optable
, name
, suffix
, mode
);
5625 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5626 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5629 /* Initialize the libfunc fields of an entire group of entries of an
5630 inter-mode-class conversion optab. The string formation rules are
5631 similar to the ones for init_libfuncs, above, but instead of having
5632 a mode name and an operand count these functions have two mode names
5633 and no operand count. */
5636 gen_interclass_conv_libfunc (convert_optab tab
,
5638 enum machine_mode tmode
,
5639 enum machine_mode fmode
)
5641 size_t opname_len
= strlen (opname
);
5642 size_t mname_len
= 0;
5644 const char *fname
, *tname
;
5646 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5647 char *libfunc_name
, *suffix
;
5648 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5651 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5652 depends on which underlying decimal floating point format is used. */
5653 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5655 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5657 nondec_name
= XALLOCAVEC (char, prefix_len
+ opname_len
+ mname_len
+ 1 + 1);
5658 nondec_name
[0] = '_';
5659 nondec_name
[1] = '_';
5660 if (targetm
.libfunc_gnu_prefix
)
5662 nondec_name
[2] = 'g';
5663 nondec_name
[3] = 'n';
5664 nondec_name
[4] = 'u';
5665 nondec_name
[5] = '_';
5668 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5669 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5671 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5674 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5675 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5676 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5678 fname
= GET_MODE_NAME (fmode
);
5679 tname
= GET_MODE_NAME (tmode
);
5681 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5683 libfunc_name
= dec_name
;
5684 suffix
= dec_suffix
;
5688 libfunc_name
= nondec_name
;
5689 suffix
= nondec_suffix
;
5693 for (q
= fname
; *q
; p
++, q
++)
5695 for (q
= tname
; *q
; p
++, q
++)
5700 set_conv_libfunc (tab
, tmode
, fmode
,
5701 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5704 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5705 int->fp conversion. */
5708 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5710 enum machine_mode tmode
,
5711 enum machine_mode fmode
)
5713 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5715 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5717 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5720 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5724 gen_ufloat_conv_libfunc (convert_optab tab
,
5725 const char *opname ATTRIBUTE_UNUSED
,
5726 enum machine_mode tmode
,
5727 enum machine_mode fmode
)
5729 if (DECIMAL_FLOAT_MODE_P (tmode
))
5730 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5732 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5735 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5736 fp->int conversion. */
5739 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5741 enum machine_mode tmode
,
5742 enum machine_mode fmode
)
5744 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5746 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5748 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5751 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5752 fp->int conversion with no decimal floating point involved. */
5755 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5757 enum machine_mode tmode
,
5758 enum machine_mode fmode
)
5760 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5762 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5764 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5767 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5768 The string formation rules are
5769 similar to the ones for init_libfunc, above. */
5772 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5773 enum machine_mode tmode
, enum machine_mode fmode
)
5775 size_t opname_len
= strlen (opname
);
5776 size_t mname_len
= 0;
5778 const char *fname
, *tname
;
5780 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5781 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5782 char *libfunc_name
, *suffix
;
5785 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5786 depends on which underlying decimal floating point format is used. */
5787 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5789 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5791 nondec_name
= XALLOCAVEC (char, 2 + opname_len
+ mname_len
+ 1 + 1);
5792 nondec_name
[0] = '_';
5793 nondec_name
[1] = '_';
5794 if (targetm
.libfunc_gnu_prefix
)
5796 nondec_name
[2] = 'g';
5797 nondec_name
[3] = 'n';
5798 nondec_name
[4] = 'u';
5799 nondec_name
[5] = '_';
5801 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5802 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5804 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5807 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5808 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5809 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5811 fname
= GET_MODE_NAME (fmode
);
5812 tname
= GET_MODE_NAME (tmode
);
5814 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5816 libfunc_name
= dec_name
;
5817 suffix
= dec_suffix
;
5821 libfunc_name
= nondec_name
;
5822 suffix
= nondec_suffix
;
5826 for (q
= fname
; *q
; p
++, q
++)
5828 for (q
= tname
; *q
; p
++, q
++)
5834 set_conv_libfunc (tab
, tmode
, fmode
,
5835 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5838 /* Pick proper libcall for trunc_optab. We need to chose if we do
5839 truncation or extension and interclass or intraclass. */
5842 gen_trunc_conv_libfunc (convert_optab tab
,
5844 enum machine_mode tmode
,
5845 enum machine_mode fmode
)
5847 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5849 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5854 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5855 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5856 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5858 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
5861 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5862 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5863 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5864 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5867 /* Pick proper libcall for extend_optab. We need to chose if we do
5868 truncation or extension and interclass or intraclass. */
5871 gen_extend_conv_libfunc (convert_optab tab
,
5872 const char *opname ATTRIBUTE_UNUSED
,
5873 enum machine_mode tmode
,
5874 enum machine_mode fmode
)
5876 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5878 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5883 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5884 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5885 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5887 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
5890 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5891 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5892 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5893 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5896 /* Pick proper libcall for fract_optab. We need to chose if we do
5897 interclass or intraclass. */
5900 gen_fract_conv_libfunc (convert_optab tab
,
5902 enum machine_mode tmode
,
5903 enum machine_mode fmode
)
5907 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
5910 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
5911 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5913 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5916 /* Pick proper libcall for fractuns_optab. */
5919 gen_fractuns_conv_libfunc (convert_optab tab
,
5921 enum machine_mode tmode
,
5922 enum machine_mode fmode
)
5926 /* One mode must be a fixed-point mode, and the other must be an integer
5928 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
5929 || (ALL_FIXED_POINT_MODE_P (fmode
)
5930 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
5933 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5936 /* Pick proper libcall for satfract_optab. We need to chose if we do
5937 interclass or intraclass. */
5940 gen_satfract_conv_libfunc (convert_optab tab
,
5942 enum machine_mode tmode
,
5943 enum machine_mode fmode
)
5947 /* TMODE must be a fixed-point mode. */
5948 if (!ALL_FIXED_POINT_MODE_P (tmode
))
5951 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
5952 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5954 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5957 /* Pick proper libcall for satfractuns_optab. */
5960 gen_satfractuns_conv_libfunc (convert_optab tab
,
5962 enum machine_mode tmode
,
5963 enum machine_mode fmode
)
5967 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
5968 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
5971 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5974 /* A table of previously-created libfuncs, hashed by name. */
5975 static GTY ((param_is (union tree_node
))) htab_t libfunc_decls
;
5977 /* Hashtable callbacks for libfunc_decls. */
5980 libfunc_decl_hash (const void *entry
)
5982 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree
) entry
));
5986 libfunc_decl_eq (const void *entry1
, const void *entry2
)
5988 return DECL_NAME ((const_tree
) entry1
) == (const_tree
) entry2
;
5991 /* Build a decl for a libfunc named NAME. */
5994 build_libfunc_function (const char *name
)
5996 tree decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
5997 get_identifier (name
),
5998 build_function_type (integer_type_node
, NULL_TREE
));
5999 /* ??? We don't have any type information except for this is
6000 a function. Pretend this is "int foo()". */
6001 DECL_ARTIFICIAL (decl
) = 1;
6002 DECL_EXTERNAL (decl
) = 1;
6003 TREE_PUBLIC (decl
) = 1;
6004 gcc_assert (DECL_ASSEMBLER_NAME (decl
));
6006 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6007 are the flags assigned by targetm.encode_section_info. */
6008 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
6014 init_one_libfunc (const char *name
)
6020 if (libfunc_decls
== NULL
)
6021 libfunc_decls
= htab_create_ggc (37, libfunc_decl_hash
,
6022 libfunc_decl_eq
, NULL
);
6024 /* See if we have already created a libfunc decl for this function. */
6025 id
= get_identifier (name
);
6026 hash
= IDENTIFIER_HASH_VALUE (id
);
6027 slot
= htab_find_slot_with_hash (libfunc_decls
, id
, hash
, INSERT
);
6028 decl
= (tree
) *slot
;
6031 /* Create a new decl, so that it can be passed to
6032 targetm.encode_section_info. */
6033 decl
= build_libfunc_function (name
);
6036 return XEXP (DECL_RTL (decl
), 0);
6039 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6042 set_user_assembler_libfunc (const char *name
, const char *asmspec
)
6048 id
= get_identifier (name
);
6049 hash
= IDENTIFIER_HASH_VALUE (id
);
6050 slot
= htab_find_slot_with_hash (libfunc_decls
, id
, hash
, NO_INSERT
);
6052 decl
= (tree
) *slot
;
6053 set_user_assembler_name (decl
, asmspec
);
6054 return XEXP (DECL_RTL (decl
), 0);
6057 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6058 MODE to NAME, which should be either 0 or a string constant. */
6060 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
6063 struct libfunc_entry e
;
6064 struct libfunc_entry
**slot
;
6065 e
.optab
= (size_t) (optable
- &optab_table
[0]);
6070 val
= init_one_libfunc (name
);
6073 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6075 *slot
= ggc_alloc_libfunc_entry ();
6076 (*slot
)->optab
= (size_t) (optable
- &optab_table
[0]);
6077 (*slot
)->mode1
= mode
;
6078 (*slot
)->mode2
= VOIDmode
;
6079 (*slot
)->libfunc
= val
;
6082 /* Call this to reset the function entry for one conversion optab
6083 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6084 either 0 or a string constant. */
6086 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
6087 enum machine_mode fmode
, const char *name
)
6090 struct libfunc_entry e
;
6091 struct libfunc_entry
**slot
;
6092 e
.optab
= (size_t) (optable
- &convert_optab_table
[0]);
6097 val
= init_one_libfunc (name
);
6100 slot
= (struct libfunc_entry
**) htab_find_slot (libfunc_hash
, &e
, INSERT
);
6102 *slot
= ggc_alloc_libfunc_entry ();
6103 (*slot
)->optab
= (size_t) (optable
- &convert_optab_table
[0]);
6104 (*slot
)->mode1
= tmode
;
6105 (*slot
)->mode2
= fmode
;
6106 (*slot
)->libfunc
= val
;
6109 /* Call this to initialize the contents of the optabs
6110 appropriately for the current target machine. */
6117 htab_empty (libfunc_hash
);
6118 /* We statically initialize the insn_codes with the equivalent of
6119 CODE_FOR_nothing. Repeat the process if reinitialising. */
6123 libfunc_hash
= htab_create_ggc (10, hash_libfunc
, eq_libfunc
, NULL
);
6125 init_optab (add_optab
, PLUS
);
6126 init_optabv (addv_optab
, PLUS
);
6127 init_optab (sub_optab
, MINUS
);
6128 init_optabv (subv_optab
, MINUS
);
6129 init_optab (ssadd_optab
, SS_PLUS
);
6130 init_optab (usadd_optab
, US_PLUS
);
6131 init_optab (sssub_optab
, SS_MINUS
);
6132 init_optab (ussub_optab
, US_MINUS
);
6133 init_optab (smul_optab
, MULT
);
6134 init_optab (ssmul_optab
, SS_MULT
);
6135 init_optab (usmul_optab
, US_MULT
);
6136 init_optabv (smulv_optab
, MULT
);
6137 init_optab (smul_highpart_optab
, UNKNOWN
);
6138 init_optab (umul_highpart_optab
, UNKNOWN
);
6139 init_optab (smul_widen_optab
, UNKNOWN
);
6140 init_optab (umul_widen_optab
, UNKNOWN
);
6141 init_optab (usmul_widen_optab
, UNKNOWN
);
6142 init_optab (smadd_widen_optab
, UNKNOWN
);
6143 init_optab (umadd_widen_optab
, UNKNOWN
);
6144 init_optab (ssmadd_widen_optab
, UNKNOWN
);
6145 init_optab (usmadd_widen_optab
, UNKNOWN
);
6146 init_optab (smsub_widen_optab
, UNKNOWN
);
6147 init_optab (umsub_widen_optab
, UNKNOWN
);
6148 init_optab (ssmsub_widen_optab
, UNKNOWN
);
6149 init_optab (usmsub_widen_optab
, UNKNOWN
);
6150 init_optab (sdiv_optab
, DIV
);
6151 init_optab (ssdiv_optab
, SS_DIV
);
6152 init_optab (usdiv_optab
, US_DIV
);
6153 init_optabv (sdivv_optab
, DIV
);
6154 init_optab (sdivmod_optab
, UNKNOWN
);
6155 init_optab (udiv_optab
, UDIV
);
6156 init_optab (udivmod_optab
, UNKNOWN
);
6157 init_optab (smod_optab
, MOD
);
6158 init_optab (umod_optab
, UMOD
);
6159 init_optab (fmod_optab
, UNKNOWN
);
6160 init_optab (remainder_optab
, UNKNOWN
);
6161 init_optab (ftrunc_optab
, UNKNOWN
);
6162 init_optab (and_optab
, AND
);
6163 init_optab (ior_optab
, IOR
);
6164 init_optab (xor_optab
, XOR
);
6165 init_optab (ashl_optab
, ASHIFT
);
6166 init_optab (ssashl_optab
, SS_ASHIFT
);
6167 init_optab (usashl_optab
, US_ASHIFT
);
6168 init_optab (ashr_optab
, ASHIFTRT
);
6169 init_optab (lshr_optab
, LSHIFTRT
);
6170 init_optabv (vashl_optab
, ASHIFT
);
6171 init_optabv (vashr_optab
, ASHIFTRT
);
6172 init_optabv (vlshr_optab
, LSHIFTRT
);
6173 init_optab (rotl_optab
, ROTATE
);
6174 init_optab (rotr_optab
, ROTATERT
);
6175 init_optab (smin_optab
, SMIN
);
6176 init_optab (smax_optab
, SMAX
);
6177 init_optab (umin_optab
, UMIN
);
6178 init_optab (umax_optab
, UMAX
);
6179 init_optab (pow_optab
, UNKNOWN
);
6180 init_optab (atan2_optab
, UNKNOWN
);
6181 init_optab (fma_optab
, FMA
);
6182 init_optab (fms_optab
, UNKNOWN
);
6183 init_optab (fnma_optab
, UNKNOWN
);
6184 init_optab (fnms_optab
, UNKNOWN
);
6186 /* These three have codes assigned exclusively for the sake of
6188 init_optab (mov_optab
, SET
);
6189 init_optab (movstrict_optab
, STRICT_LOW_PART
);
6190 init_optab (cbranch_optab
, COMPARE
);
6192 init_optab (cmov_optab
, UNKNOWN
);
6193 init_optab (cstore_optab
, UNKNOWN
);
6194 init_optab (ctrap_optab
, UNKNOWN
);
6196 init_optab (storent_optab
, UNKNOWN
);
6198 init_optab (cmp_optab
, UNKNOWN
);
6199 init_optab (ucmp_optab
, UNKNOWN
);
6201 init_optab (eq_optab
, EQ
);
6202 init_optab (ne_optab
, NE
);
6203 init_optab (gt_optab
, GT
);
6204 init_optab (ge_optab
, GE
);
6205 init_optab (lt_optab
, LT
);
6206 init_optab (le_optab
, LE
);
6207 init_optab (unord_optab
, UNORDERED
);
6209 init_optab (neg_optab
, NEG
);
6210 init_optab (ssneg_optab
, SS_NEG
);
6211 init_optab (usneg_optab
, US_NEG
);
6212 init_optabv (negv_optab
, NEG
);
6213 init_optab (abs_optab
, ABS
);
6214 init_optabv (absv_optab
, ABS
);
6215 init_optab (addcc_optab
, UNKNOWN
);
6216 init_optab (one_cmpl_optab
, NOT
);
6217 init_optab (bswap_optab
, BSWAP
);
6218 init_optab (ffs_optab
, FFS
);
6219 init_optab (clz_optab
, CLZ
);
6220 init_optab (ctz_optab
, CTZ
);
6221 init_optab (clrsb_optab
, CLRSB
);
6222 init_optab (popcount_optab
, POPCOUNT
);
6223 init_optab (parity_optab
, PARITY
);
6224 init_optab (sqrt_optab
, SQRT
);
6225 init_optab (floor_optab
, UNKNOWN
);
6226 init_optab (ceil_optab
, UNKNOWN
);
6227 init_optab (round_optab
, UNKNOWN
);
6228 init_optab (btrunc_optab
, UNKNOWN
);
6229 init_optab (nearbyint_optab
, UNKNOWN
);
6230 init_optab (rint_optab
, UNKNOWN
);
6231 init_optab (sincos_optab
, UNKNOWN
);
6232 init_optab (sin_optab
, UNKNOWN
);
6233 init_optab (asin_optab
, UNKNOWN
);
6234 init_optab (cos_optab
, UNKNOWN
);
6235 init_optab (acos_optab
, UNKNOWN
);
6236 init_optab (exp_optab
, UNKNOWN
);
6237 init_optab (exp10_optab
, UNKNOWN
);
6238 init_optab (exp2_optab
, UNKNOWN
);
6239 init_optab (expm1_optab
, UNKNOWN
);
6240 init_optab (ldexp_optab
, UNKNOWN
);
6241 init_optab (scalb_optab
, UNKNOWN
);
6242 init_optab (significand_optab
, UNKNOWN
);
6243 init_optab (logb_optab
, UNKNOWN
);
6244 init_optab (ilogb_optab
, UNKNOWN
);
6245 init_optab (log_optab
, UNKNOWN
);
6246 init_optab (log10_optab
, UNKNOWN
);
6247 init_optab (log2_optab
, UNKNOWN
);
6248 init_optab (log1p_optab
, UNKNOWN
);
6249 init_optab (tan_optab
, UNKNOWN
);
6250 init_optab (atan_optab
, UNKNOWN
);
6251 init_optab (copysign_optab
, UNKNOWN
);
6252 init_optab (signbit_optab
, UNKNOWN
);
6254 init_optab (isinf_optab
, UNKNOWN
);
6256 init_optab (strlen_optab
, UNKNOWN
);
6257 init_optab (push_optab
, UNKNOWN
);
6259 init_optab (reduc_smax_optab
, UNKNOWN
);
6260 init_optab (reduc_umax_optab
, UNKNOWN
);
6261 init_optab (reduc_smin_optab
, UNKNOWN
);
6262 init_optab (reduc_umin_optab
, UNKNOWN
);
6263 init_optab (reduc_splus_optab
, UNKNOWN
);
6264 init_optab (reduc_uplus_optab
, UNKNOWN
);
6266 init_optab (ssum_widen_optab
, UNKNOWN
);
6267 init_optab (usum_widen_optab
, UNKNOWN
);
6268 init_optab (sdot_prod_optab
, UNKNOWN
);
6269 init_optab (udot_prod_optab
, UNKNOWN
);
6271 init_optab (vec_extract_optab
, UNKNOWN
);
6272 init_optab (vec_extract_even_optab
, UNKNOWN
);
6273 init_optab (vec_extract_odd_optab
, UNKNOWN
);
6274 init_optab (vec_interleave_high_optab
, UNKNOWN
);
6275 init_optab (vec_interleave_low_optab
, UNKNOWN
);
6276 init_optab (vec_set_optab
, UNKNOWN
);
6277 init_optab (vec_init_optab
, UNKNOWN
);
6278 init_optab (vec_shl_optab
, UNKNOWN
);
6279 init_optab (vec_shr_optab
, UNKNOWN
);
6280 init_optab (vec_realign_load_optab
, UNKNOWN
);
6281 init_optab (movmisalign_optab
, UNKNOWN
);
6282 init_optab (vec_widen_umult_hi_optab
, UNKNOWN
);
6283 init_optab (vec_widen_umult_lo_optab
, UNKNOWN
);
6284 init_optab (vec_widen_smult_hi_optab
, UNKNOWN
);
6285 init_optab (vec_widen_smult_lo_optab
, UNKNOWN
);
6286 init_optab (vec_widen_ushiftl_hi_optab
, UNKNOWN
);
6287 init_optab (vec_widen_ushiftl_lo_optab
, UNKNOWN
);
6288 init_optab (vec_widen_sshiftl_hi_optab
, UNKNOWN
);
6289 init_optab (vec_widen_sshiftl_lo_optab
, UNKNOWN
);
6290 init_optab (vec_unpacks_hi_optab
, UNKNOWN
);
6291 init_optab (vec_unpacks_lo_optab
, UNKNOWN
);
6292 init_optab (vec_unpacku_hi_optab
, UNKNOWN
);
6293 init_optab (vec_unpacku_lo_optab
, UNKNOWN
);
6294 init_optab (vec_unpacks_float_hi_optab
, UNKNOWN
);
6295 init_optab (vec_unpacks_float_lo_optab
, UNKNOWN
);
6296 init_optab (vec_unpacku_float_hi_optab
, UNKNOWN
);
6297 init_optab (vec_unpacku_float_lo_optab
, UNKNOWN
);
6298 init_optab (vec_pack_trunc_optab
, UNKNOWN
);
6299 init_optab (vec_pack_usat_optab
, UNKNOWN
);
6300 init_optab (vec_pack_ssat_optab
, UNKNOWN
);
6301 init_optab (vec_pack_ufix_trunc_optab
, UNKNOWN
);
6302 init_optab (vec_pack_sfix_trunc_optab
, UNKNOWN
);
6304 init_optab (powi_optab
, UNKNOWN
);
6307 init_convert_optab (sext_optab
, SIGN_EXTEND
);
6308 init_convert_optab (zext_optab
, ZERO_EXTEND
);
6309 init_convert_optab (trunc_optab
, TRUNCATE
);
6310 init_convert_optab (sfix_optab
, FIX
);
6311 init_convert_optab (ufix_optab
, UNSIGNED_FIX
);
6312 init_convert_optab (sfixtrunc_optab
, UNKNOWN
);
6313 init_convert_optab (ufixtrunc_optab
, UNKNOWN
);
6314 init_convert_optab (sfloat_optab
, FLOAT
);
6315 init_convert_optab (ufloat_optab
, UNSIGNED_FLOAT
);
6316 init_convert_optab (lrint_optab
, UNKNOWN
);
6317 init_convert_optab (lround_optab
, UNKNOWN
);
6318 init_convert_optab (lfloor_optab
, UNKNOWN
);
6319 init_convert_optab (lceil_optab
, UNKNOWN
);
6321 init_convert_optab (fract_optab
, FRACT_CONVERT
);
6322 init_convert_optab (fractuns_optab
, UNSIGNED_FRACT_CONVERT
);
6323 init_convert_optab (satfract_optab
, SAT_FRACT
);
6324 init_convert_optab (satfractuns_optab
, UNSIGNED_SAT_FRACT
);
6326 /* Fill in the optabs with the insns we support. */
6329 /* Initialize the optabs with the names of the library functions. */
6330 add_optab
->libcall_basename
= "add";
6331 add_optab
->libcall_suffix
= '3';
6332 add_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6333 addv_optab
->libcall_basename
= "add";
6334 addv_optab
->libcall_suffix
= '3';
6335 addv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6336 ssadd_optab
->libcall_basename
= "ssadd";
6337 ssadd_optab
->libcall_suffix
= '3';
6338 ssadd_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6339 usadd_optab
->libcall_basename
= "usadd";
6340 usadd_optab
->libcall_suffix
= '3';
6341 usadd_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6342 sub_optab
->libcall_basename
= "sub";
6343 sub_optab
->libcall_suffix
= '3';
6344 sub_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6345 subv_optab
->libcall_basename
= "sub";
6346 subv_optab
->libcall_suffix
= '3';
6347 subv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6348 sssub_optab
->libcall_basename
= "sssub";
6349 sssub_optab
->libcall_suffix
= '3';
6350 sssub_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6351 ussub_optab
->libcall_basename
= "ussub";
6352 ussub_optab
->libcall_suffix
= '3';
6353 ussub_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6354 smul_optab
->libcall_basename
= "mul";
6355 smul_optab
->libcall_suffix
= '3';
6356 smul_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6357 smulv_optab
->libcall_basename
= "mul";
6358 smulv_optab
->libcall_suffix
= '3';
6359 smulv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6360 ssmul_optab
->libcall_basename
= "ssmul";
6361 ssmul_optab
->libcall_suffix
= '3';
6362 ssmul_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6363 usmul_optab
->libcall_basename
= "usmul";
6364 usmul_optab
->libcall_suffix
= '3';
6365 usmul_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6366 sdiv_optab
->libcall_basename
= "div";
6367 sdiv_optab
->libcall_suffix
= '3';
6368 sdiv_optab
->libcall_gen
= gen_int_fp_signed_fixed_libfunc
;
6369 sdivv_optab
->libcall_basename
= "divv";
6370 sdivv_optab
->libcall_suffix
= '3';
6371 sdivv_optab
->libcall_gen
= gen_int_libfunc
;
6372 ssdiv_optab
->libcall_basename
= "ssdiv";
6373 ssdiv_optab
->libcall_suffix
= '3';
6374 ssdiv_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6375 udiv_optab
->libcall_basename
= "udiv";
6376 udiv_optab
->libcall_suffix
= '3';
6377 udiv_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6378 usdiv_optab
->libcall_basename
= "usdiv";
6379 usdiv_optab
->libcall_suffix
= '3';
6380 usdiv_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6381 sdivmod_optab
->libcall_basename
= "divmod";
6382 sdivmod_optab
->libcall_suffix
= '4';
6383 sdivmod_optab
->libcall_gen
= gen_int_libfunc
;
6384 udivmod_optab
->libcall_basename
= "udivmod";
6385 udivmod_optab
->libcall_suffix
= '4';
6386 udivmod_optab
->libcall_gen
= gen_int_libfunc
;
6387 smod_optab
->libcall_basename
= "mod";
6388 smod_optab
->libcall_suffix
= '3';
6389 smod_optab
->libcall_gen
= gen_int_libfunc
;
6390 umod_optab
->libcall_basename
= "umod";
6391 umod_optab
->libcall_suffix
= '3';
6392 umod_optab
->libcall_gen
= gen_int_libfunc
;
6393 ftrunc_optab
->libcall_basename
= "ftrunc";
6394 ftrunc_optab
->libcall_suffix
= '2';
6395 ftrunc_optab
->libcall_gen
= gen_fp_libfunc
;
6396 and_optab
->libcall_basename
= "and";
6397 and_optab
->libcall_suffix
= '3';
6398 and_optab
->libcall_gen
= gen_int_libfunc
;
6399 ior_optab
->libcall_basename
= "ior";
6400 ior_optab
->libcall_suffix
= '3';
6401 ior_optab
->libcall_gen
= gen_int_libfunc
;
6402 xor_optab
->libcall_basename
= "xor";
6403 xor_optab
->libcall_suffix
= '3';
6404 xor_optab
->libcall_gen
= gen_int_libfunc
;
6405 ashl_optab
->libcall_basename
= "ashl";
6406 ashl_optab
->libcall_suffix
= '3';
6407 ashl_optab
->libcall_gen
= gen_int_fixed_libfunc
;
6408 ssashl_optab
->libcall_basename
= "ssashl";
6409 ssashl_optab
->libcall_suffix
= '3';
6410 ssashl_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6411 usashl_optab
->libcall_basename
= "usashl";
6412 usashl_optab
->libcall_suffix
= '3';
6413 usashl_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6414 ashr_optab
->libcall_basename
= "ashr";
6415 ashr_optab
->libcall_suffix
= '3';
6416 ashr_optab
->libcall_gen
= gen_int_signed_fixed_libfunc
;
6417 lshr_optab
->libcall_basename
= "lshr";
6418 lshr_optab
->libcall_suffix
= '3';
6419 lshr_optab
->libcall_gen
= gen_int_unsigned_fixed_libfunc
;
6420 smin_optab
->libcall_basename
= "min";
6421 smin_optab
->libcall_suffix
= '3';
6422 smin_optab
->libcall_gen
= gen_int_fp_libfunc
;
6423 smax_optab
->libcall_basename
= "max";
6424 smax_optab
->libcall_suffix
= '3';
6425 smax_optab
->libcall_gen
= gen_int_fp_libfunc
;
6426 umin_optab
->libcall_basename
= "umin";
6427 umin_optab
->libcall_suffix
= '3';
6428 umin_optab
->libcall_gen
= gen_int_libfunc
;
6429 umax_optab
->libcall_basename
= "umax";
6430 umax_optab
->libcall_suffix
= '3';
6431 umax_optab
->libcall_gen
= gen_int_libfunc
;
6432 neg_optab
->libcall_basename
= "neg";
6433 neg_optab
->libcall_suffix
= '2';
6434 neg_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6435 ssneg_optab
->libcall_basename
= "ssneg";
6436 ssneg_optab
->libcall_suffix
= '2';
6437 ssneg_optab
->libcall_gen
= gen_signed_fixed_libfunc
;
6438 usneg_optab
->libcall_basename
= "usneg";
6439 usneg_optab
->libcall_suffix
= '2';
6440 usneg_optab
->libcall_gen
= gen_unsigned_fixed_libfunc
;
6441 negv_optab
->libcall_basename
= "neg";
6442 negv_optab
->libcall_suffix
= '2';
6443 negv_optab
->libcall_gen
= gen_intv_fp_libfunc
;
6444 one_cmpl_optab
->libcall_basename
= "one_cmpl";
6445 one_cmpl_optab
->libcall_suffix
= '2';
6446 one_cmpl_optab
->libcall_gen
= gen_int_libfunc
;
6447 ffs_optab
->libcall_basename
= "ffs";
6448 ffs_optab
->libcall_suffix
= '2';
6449 ffs_optab
->libcall_gen
= gen_int_libfunc
;
6450 clz_optab
->libcall_basename
= "clz";
6451 clz_optab
->libcall_suffix
= '2';
6452 clz_optab
->libcall_gen
= gen_int_libfunc
;
6453 ctz_optab
->libcall_basename
= "ctz";
6454 ctz_optab
->libcall_suffix
= '2';
6455 ctz_optab
->libcall_gen
= gen_int_libfunc
;
6456 clrsb_optab
->libcall_basename
= "clrsb";
6457 clrsb_optab
->libcall_suffix
= '2';
6458 clrsb_optab
->libcall_gen
= gen_int_libfunc
;
6459 popcount_optab
->libcall_basename
= "popcount";
6460 popcount_optab
->libcall_suffix
= '2';
6461 popcount_optab
->libcall_gen
= gen_int_libfunc
;
6462 parity_optab
->libcall_basename
= "parity";
6463 parity_optab
->libcall_suffix
= '2';
6464 parity_optab
->libcall_gen
= gen_int_libfunc
;
6466 /* Comparison libcalls for integers MUST come in pairs,
6468 cmp_optab
->libcall_basename
= "cmp";
6469 cmp_optab
->libcall_suffix
= '2';
6470 cmp_optab
->libcall_gen
= gen_int_fp_fixed_libfunc
;
6471 ucmp_optab
->libcall_basename
= "ucmp";
6472 ucmp_optab
->libcall_suffix
= '2';
6473 ucmp_optab
->libcall_gen
= gen_int_libfunc
;
6475 /* EQ etc are floating point only. */
6476 eq_optab
->libcall_basename
= "eq";
6477 eq_optab
->libcall_suffix
= '2';
6478 eq_optab
->libcall_gen
= gen_fp_libfunc
;
6479 ne_optab
->libcall_basename
= "ne";
6480 ne_optab
->libcall_suffix
= '2';
6481 ne_optab
->libcall_gen
= gen_fp_libfunc
;
6482 gt_optab
->libcall_basename
= "gt";
6483 gt_optab
->libcall_suffix
= '2';
6484 gt_optab
->libcall_gen
= gen_fp_libfunc
;
6485 ge_optab
->libcall_basename
= "ge";
6486 ge_optab
->libcall_suffix
= '2';
6487 ge_optab
->libcall_gen
= gen_fp_libfunc
;
6488 lt_optab
->libcall_basename
= "lt";
6489 lt_optab
->libcall_suffix
= '2';
6490 lt_optab
->libcall_gen
= gen_fp_libfunc
;
6491 le_optab
->libcall_basename
= "le";
6492 le_optab
->libcall_suffix
= '2';
6493 le_optab
->libcall_gen
= gen_fp_libfunc
;
6494 unord_optab
->libcall_basename
= "unord";
6495 unord_optab
->libcall_suffix
= '2';
6496 unord_optab
->libcall_gen
= gen_fp_libfunc
;
6498 powi_optab
->libcall_basename
= "powi";
6499 powi_optab
->libcall_suffix
= '2';
6500 powi_optab
->libcall_gen
= gen_fp_libfunc
;
6503 sfloat_optab
->libcall_basename
= "float";
6504 sfloat_optab
->libcall_gen
= gen_int_to_fp_conv_libfunc
;
6505 ufloat_optab
->libcall_gen
= gen_ufloat_conv_libfunc
;
6506 sfix_optab
->libcall_basename
= "fix";
6507 sfix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6508 ufix_optab
->libcall_basename
= "fixuns";
6509 ufix_optab
->libcall_gen
= gen_fp_to_int_conv_libfunc
;
6510 lrint_optab
->libcall_basename
= "lrint";
6511 lrint_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6512 lround_optab
->libcall_basename
= "lround";
6513 lround_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6514 lfloor_optab
->libcall_basename
= "lfloor";
6515 lfloor_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6516 lceil_optab
->libcall_basename
= "lceil";
6517 lceil_optab
->libcall_gen
= gen_int_to_fp_nondecimal_conv_libfunc
;
6519 /* trunc_optab is also used for FLOAT_EXTEND. */
6520 sext_optab
->libcall_basename
= "extend";
6521 sext_optab
->libcall_gen
= gen_extend_conv_libfunc
;
6522 trunc_optab
->libcall_basename
= "trunc";
6523 trunc_optab
->libcall_gen
= gen_trunc_conv_libfunc
;
6525 /* Conversions for fixed-point modes and other modes. */
6526 fract_optab
->libcall_basename
= "fract";
6527 fract_optab
->libcall_gen
= gen_fract_conv_libfunc
;
6528 satfract_optab
->libcall_basename
= "satfract";
6529 satfract_optab
->libcall_gen
= gen_satfract_conv_libfunc
;
6530 fractuns_optab
->libcall_basename
= "fractuns";
6531 fractuns_optab
->libcall_gen
= gen_fractuns_conv_libfunc
;
6532 satfractuns_optab
->libcall_basename
= "satfractuns";
6533 satfractuns_optab
->libcall_gen
= gen_satfractuns_conv_libfunc
;
6535 /* The ffs function operates on `int'. Fall back on it if we do not
6536 have a libgcc2 function for that width. */
6537 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6538 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6541 /* Explicitly initialize the bswap libfuncs since we need them to be
6542 valid for things other than word_mode. */
6543 if (targetm
.libfunc_gnu_prefix
)
6545 set_optab_libfunc (bswap_optab
, SImode
, "__gnu_bswapsi2");
6546 set_optab_libfunc (bswap_optab
, DImode
, "__gnu_bswapdi2");
6550 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6551 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6554 /* Use cabs for double complex abs, since systems generally have cabs.
6555 Don't define any libcall for float complex, so that cabs will be used. */
6556 if (complex_double_type_node
)
6557 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
), "cabs");
6559 abort_libfunc
= init_one_libfunc ("abort");
6560 memcpy_libfunc
= init_one_libfunc ("memcpy");
6561 memmove_libfunc
= init_one_libfunc ("memmove");
6562 memcmp_libfunc
= init_one_libfunc ("memcmp");
6563 memset_libfunc
= init_one_libfunc ("memset");
6564 setbits_libfunc
= init_one_libfunc ("__setbits");
6566 #ifndef DONT_USE_BUILTIN_SETJMP
6567 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6568 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6570 setjmp_libfunc
= init_one_libfunc ("setjmp");
6571 longjmp_libfunc
= init_one_libfunc ("longjmp");
6573 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6574 unwind_sjlj_unregister_libfunc
6575 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6577 /* For function entry/exit instrumentation. */
6578 profile_function_entry_libfunc
6579 = init_one_libfunc ("__cyg_profile_func_enter");
6580 profile_function_exit_libfunc
6581 = init_one_libfunc ("__cyg_profile_func_exit");
6583 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6585 /* Allow the target to add more libcalls or rename some, etc. */
6586 targetm
.init_libfuncs ();
6589 /* Print information about the current contents of the optabs on
6593 debug_optab_libfuncs (void)
6599 /* Dump the arithmetic optabs. */
6600 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
6601 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6606 o
= &optab_table
[i
];
6607 l
= optab_libfunc (o
, (enum machine_mode
) j
);
6610 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6611 fprintf (stderr
, "%s\t%s:\t%s\n",
6612 GET_RTX_NAME (o
->code
),
6618 /* Dump the conversion optabs. */
6619 for (i
= 0; i
< (int) COI_MAX
; ++i
)
6620 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6621 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6626 o
= &convert_optab_table
[i
];
6627 l
= convert_optab_libfunc (o
, (enum machine_mode
) j
,
6628 (enum machine_mode
) k
);
6631 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6632 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6633 GET_RTX_NAME (o
->code
),
6642 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6643 CODE. Return 0 on failure. */
6646 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
6648 enum machine_mode mode
= GET_MODE (op1
);
6649 enum insn_code icode
;
6653 if (mode
== VOIDmode
)
6656 icode
= optab_handler (ctrap_optab
, mode
);
6657 if (icode
== CODE_FOR_nothing
)
6660 /* Some targets only accept a zero trap code. */
6661 if (!insn_operand_matches (icode
, 3, tcode
))
6664 do_pending_stack_adjust ();
6666 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
6671 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
6674 /* If that failed, then give up. */
6682 insn
= get_insns ();
6687 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6688 or unsigned operation code. */
6690 static enum rtx_code
6691 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6703 code
= unsignedp
? LTU
: LT
;
6706 code
= unsignedp
? LEU
: LE
;
6709 code
= unsignedp
? GTU
: GT
;
6712 code
= unsignedp
? GEU
: GE
;
6715 case UNORDERED_EXPR
:
6746 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6747 unsigned operators. Do not generate compare instruction. */
6750 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
6752 struct expand_operand ops
[2];
6753 enum rtx_code rcode
;
6755 rtx rtx_op0
, rtx_op1
;
6757 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6758 ensures that condition is a relational operation. */
6759 gcc_assert (COMPARISON_CLASS_P (cond
));
6761 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
6762 t_op0
= TREE_OPERAND (cond
, 0);
6763 t_op1
= TREE_OPERAND (cond
, 1);
6765 /* Expand operands. */
6766 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6768 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6771 create_input_operand (&ops
[0], rtx_op0
, GET_MODE (rtx_op0
));
6772 create_input_operand (&ops
[1], rtx_op1
, GET_MODE (rtx_op1
));
6773 if (!maybe_legitimize_operands (icode
, 4, 2, ops
))
6775 return gen_rtx_fmt_ee (rcode
, VOIDmode
, ops
[0].value
, ops
[1].value
);
6778 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6779 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6782 can_vec_perm_p (enum machine_mode mode
, bool variable
,
6783 const unsigned char *sel
)
6785 enum machine_mode qimode
;
6787 /* If the target doesn't implement a vector mode for the vector type,
6788 then no operations are supported. */
6789 if (!VECTOR_MODE_P (mode
))
6794 if (direct_optab_handler (vec_perm_const_optab
, mode
) != CODE_FOR_nothing
6796 || targetm
.vectorize
.vec_perm_const_ok
== NULL
6797 || targetm
.vectorize
.vec_perm_const_ok (mode
, sel
)))
6801 if (direct_optab_handler (vec_perm_optab
, mode
) != CODE_FOR_nothing
)
6804 /* We allow fallback to a QI vector mode, and adjust the mask. */
6805 if (GET_MODE_INNER (mode
) == QImode
)
6807 qimode
= mode_for_vector (QImode
, GET_MODE_SIZE (mode
));
6808 if (!VECTOR_MODE_P (qimode
))
6811 /* ??? For completeness, we ought to check the QImode version of
6812 vec_perm_const_optab. But all users of this implicit lowering
6813 feature implement the variable vec_perm_optab. */
6814 if (direct_optab_handler (vec_perm_optab
, qimode
) == CODE_FOR_nothing
)
6817 /* In order to support the lowering of variable permutations,
6818 we need to support shifts and adds. */
6821 if (GET_MODE_UNIT_SIZE (mode
) > 2
6822 && optab_handler (ashl_optab
, mode
) == CODE_FOR_nothing
6823 && optab_handler (vashl_optab
, mode
) == CODE_FOR_nothing
)
6825 if (optab_handler (add_optab
, qimode
) == CODE_FOR_nothing
)
6832 /* Return true if we can implement VEC_INTERLEAVE_{HIGH,LOW}_EXPR or
6833 VEC_EXTRACT_{EVEN,ODD}_EXPR with VEC_PERM_EXPR for this target.
6834 If PSEL is non-null, return the selector for the permutation. */
6837 can_vec_perm_for_code_p (enum tree_code code
, enum machine_mode mode
,
6840 bool need_sel_test
= false;
6841 enum insn_code icode
;
6843 /* If the target doesn't implement a vector mode for the vector type,
6844 then no operations are supported. */
6845 if (!VECTOR_MODE_P (mode
))
6848 /* Do as many tests as possible without reqiring the selector. */
6849 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6850 if (icode
== CODE_FOR_nothing
&& GET_MODE_INNER (mode
) != QImode
)
6852 enum machine_mode qimode
6853 = mode_for_vector (QImode
, GET_MODE_SIZE (mode
));
6854 if (VECTOR_MODE_P (qimode
))
6855 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6857 if (icode
== CODE_FOR_nothing
)
6859 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
6860 if (icode
!= CODE_FOR_nothing
6861 && targetm
.vectorize
.vec_perm_const_ok
!= NULL
)
6862 need_sel_test
= true;
6864 if (icode
== CODE_FOR_nothing
)
6867 /* If the selector is required, or if we need to test it, build it. */
6868 if (psel
|| need_sel_test
)
6870 int i
, nelt
= GET_MODE_NUNITS (mode
), alt
= 0;
6871 unsigned char *data
= XALLOCAVEC (unsigned char, nelt
);
6875 case VEC_EXTRACT_ODD_EXPR
:
6878 case VEC_EXTRACT_EVEN_EXPR
:
6879 for (i
= 0; i
< nelt
; ++i
)
6880 data
[i
] = i
* 2 + alt
;
6883 case VEC_INTERLEAVE_HIGH_EXPR
:
6886 case VEC_INTERLEAVE_LOW_EXPR
:
6887 for (i
= 0; i
< nelt
/ 2; ++i
)
6889 data
[i
* 2] = i
+ alt
;
6890 data
[i
* 2 + 1] = i
+ nelt
+ alt
;
6899 && !targetm
.vectorize
.vec_perm_const_ok (mode
, data
))
6904 rtvec vec
= rtvec_alloc (nelt
);
6905 enum machine_mode imode
= mode
;
6907 for (i
= 0; i
< nelt
; ++i
)
6908 RTVEC_ELT (vec
, i
) = GEN_INT (data
[i
]);
6910 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
6912 imode
= int_mode_for_mode (GET_MODE_INNER (mode
));
6913 imode
= mode_for_vector (imode
, nelt
);
6914 gcc_assert (GET_MODE_CLASS (imode
) == MODE_VECTOR_INT
);
6917 *psel
= gen_rtx_CONST_VECTOR (imode
, vec
);
6924 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
6927 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
6928 rtx v0
, rtx v1
, rtx sel
)
6930 enum machine_mode tmode
= GET_MODE (target
);
6931 enum machine_mode smode
= GET_MODE (sel
);
6932 struct expand_operand ops
[4];
6934 create_output_operand (&ops
[0], target
, tmode
);
6935 create_input_operand (&ops
[3], sel
, smode
);
6937 /* Make an effort to preserve v0 == v1. The target expander is able to
6938 rely on this to determine if we're permuting a single input operand. */
6939 if (rtx_equal_p (v0
, v1
))
6941 if (!insn_operand_matches (icode
, 1, v0
))
6942 v0
= force_reg (tmode
, v0
);
6943 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
6944 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
6946 create_fixed_operand (&ops
[1], v0
);
6947 create_fixed_operand (&ops
[2], v0
);
6951 create_input_operand (&ops
[1], v0
, tmode
);
6952 create_input_operand (&ops
[2], v1
, tmode
);
6955 if (maybe_expand_insn (icode
, 4, ops
))
6956 return ops
[0].value
;
6960 /* Generate instructions for vec_perm optab given its mode
6961 and three operands. */
6964 expand_vec_perm (enum machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6966 enum insn_code icode
;
6967 enum machine_mode qimode
;
6968 unsigned int i
, w
, e
, u
;
6969 rtx tmp
, sel_qi
= NULL
;
6972 if (!target
|| GET_MODE (target
) != mode
)
6973 target
= gen_reg_rtx (mode
);
6975 w
= GET_MODE_SIZE (mode
);
6976 e
= GET_MODE_NUNITS (mode
);
6977 u
= GET_MODE_UNIT_SIZE (mode
);
6979 /* Set QIMODE to a different vector mode with byte elements.
6980 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6982 if (GET_MODE_INNER (mode
) != QImode
)
6984 qimode
= mode_for_vector (QImode
, w
);
6985 if (!VECTOR_MODE_P (qimode
))
6989 /* If the input is a constant, expand it specially. */
6990 if (CONSTANT_P (sel
))
6992 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
6993 if (icode
!= CODE_FOR_nothing
)
6995 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
7000 /* Fall back to a constant byte-based permutation. */
7001 if (qimode
!= VOIDmode
)
7003 vec
= rtvec_alloc (w
);
7004 for (i
= 0; i
< e
; ++i
)
7006 unsigned int j
, this_e
;
7008 this_e
= INTVAL (XVECEXP (sel
, 0, i
));
7009 this_e
&= 2 * e
- 1;
7012 for (j
= 0; j
< u
; ++j
)
7013 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
7015 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
7017 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
7018 if (icode
!= CODE_FOR_nothing
)
7020 tmp
= expand_vec_perm_1 (icode
, gen_lowpart (qimode
, target
),
7021 gen_lowpart (qimode
, v0
),
7022 gen_lowpart (qimode
, v1
), sel_qi
);
7024 return gen_lowpart (mode
, tmp
);
7029 /* Otherwise expand as a fully variable permuation. */
7030 icode
= direct_optab_handler (vec_perm_optab
, mode
);
7031 if (icode
!= CODE_FOR_nothing
)
7033 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
7038 /* As a special case to aid several targets, lower the element-based
7039 permutation to a byte-based permutation and try again. */
7040 if (qimode
== VOIDmode
)
7042 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
7043 if (icode
== CODE_FOR_nothing
)
7048 /* Multiply each element by its byte size. */
7049 enum machine_mode selmode
= GET_MODE (sel
);
7051 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
7052 sel
, 0, OPTAB_DIRECT
);
7054 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
7055 GEN_INT (exact_log2 (u
)),
7056 sel
, 0, OPTAB_DIRECT
);
7057 gcc_assert (sel
!= NULL
);
7059 /* Broadcast the low byte each element into each of its bytes. */
7060 vec
= rtvec_alloc (w
);
7061 for (i
= 0; i
< w
; ++i
)
7063 int this_e
= i
/ u
* u
;
7064 if (BYTES_BIG_ENDIAN
)
7066 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
7068 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
7069 sel
= gen_lowpart (qimode
, sel
);
7070 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
7071 gcc_assert (sel
!= NULL
);
7073 /* Add the byte offset to each byte element. */
7074 /* Note that the definition of the indicies here is memory ordering,
7075 so there should be no difference between big and little endian. */
7076 vec
= rtvec_alloc (w
);
7077 for (i
= 0; i
< w
; ++i
)
7078 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
7079 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
7080 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
7081 sel
, 0, OPTAB_DIRECT
);
7082 gcc_assert (sel_qi
!= NULL
);
7085 tmp
= expand_vec_perm_1 (icode
, gen_lowpart (qimode
, target
),
7086 gen_lowpart (qimode
, v0
),
7087 gen_lowpart (qimode
, v1
), sel_qi
);
7089 tmp
= gen_lowpart (mode
, tmp
);
7093 /* Return insn code for a conditional operator with a comparison in
7094 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
7096 static inline enum insn_code
7097 get_vcond_icode (enum machine_mode vmode
, enum machine_mode cmode
, bool uns
)
7099 enum insn_code icode
= CODE_FOR_nothing
;
7101 icode
= convert_optab_handler (vcondu_optab
, vmode
, cmode
);
7103 icode
= convert_optab_handler (vcond_optab
, vmode
, cmode
);
7107 /* Return TRUE iff, appropriate vector insns are available
7108 for vector cond expr with vector type VALUE_TYPE and a comparison
7109 with operand vector types in CMP_OP_TYPE. */
7112 expand_vec_cond_expr_p (tree value_type
, tree cmp_op_type
)
7114 enum machine_mode value_mode
= TYPE_MODE (value_type
);
7115 enum machine_mode cmp_op_mode
= TYPE_MODE (cmp_op_type
);
7116 if (GET_MODE_SIZE (value_mode
) != GET_MODE_SIZE (cmp_op_mode
)
7117 || GET_MODE_NUNITS (value_mode
) != GET_MODE_NUNITS (cmp_op_mode
)
7118 || get_vcond_icode (TYPE_MODE (value_type
), TYPE_MODE (cmp_op_type
),
7119 TYPE_UNSIGNED (cmp_op_type
)) == CODE_FOR_nothing
)
7124 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
7128 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
7131 struct expand_operand ops
[6];
7132 enum insn_code icode
;
7133 rtx comparison
, rtx_op1
, rtx_op2
;
7134 enum machine_mode mode
= TYPE_MODE (vec_cond_type
);
7135 enum machine_mode cmp_op_mode
;
7138 gcc_assert (COMPARISON_CLASS_P (op0
));
7140 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0
, 0)));
7141 cmp_op_mode
= TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0
, 0)));
7143 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
7144 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
7146 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
7147 if (icode
== CODE_FOR_nothing
)
7150 comparison
= vector_compare_rtx (op0
, unsignedp
, icode
);
7151 rtx_op1
= expand_normal (op1
);
7152 rtx_op2
= expand_normal (op2
);
7154 create_output_operand (&ops
[0], target
, mode
);
7155 create_input_operand (&ops
[1], rtx_op1
, mode
);
7156 create_input_operand (&ops
[2], rtx_op2
, mode
);
7157 create_fixed_operand (&ops
[3], comparison
);
7158 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
7159 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
7160 expand_insn (icode
, 6, ops
);
7161 return ops
[0].value
;
7165 /* This is an internal subroutine of the other compare_and_swap expanders.
7166 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
7167 operation. TARGET is an optional place to store the value result of
7168 the operation. ICODE is the particular instruction to expand. Return
7169 the result of the operation. */
7172 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
7173 rtx target
, enum insn_code icode
)
7175 struct expand_operand ops
[4];
7176 enum machine_mode mode
= GET_MODE (mem
);
7178 create_output_operand (&ops
[0], target
, mode
);
7179 create_fixed_operand (&ops
[1], mem
);
7180 /* OLD_VAL and NEW_VAL may have been promoted to a wider mode.
7181 Shrink them if so. */
7182 create_convert_operand_to (&ops
[2], old_val
, mode
, true);
7183 create_convert_operand_to (&ops
[3], new_val
, mode
, true);
7184 if (maybe_expand_insn (icode
, 4, ops
))
7185 return ops
[0].value
;
7189 /* Expand a compare-and-swap operation and return its value. */
7192 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7194 enum machine_mode mode
= GET_MODE (mem
);
7195 enum insn_code icode
7196 = direct_optab_handler (sync_compare_and_swap_optab
, mode
);
7198 if (icode
== CODE_FOR_nothing
)
7201 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
7204 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7208 find_cc_set (rtx x
, const_rtx pat
, void *data
)
7210 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
7211 && GET_CODE (pat
) == SET
)
7213 rtx
*p_cc_reg
= (rtx
*) data
;
7214 gcc_assert (!*p_cc_reg
);
7219 /* Expand a compare-and-swap operation and store true into the result if
7220 the operation was successful and false otherwise. Return the result.
7221 Unlike other routines, TARGET is not optional. */
7224 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
7226 enum machine_mode mode
= GET_MODE (mem
);
7227 enum insn_code icode
;
7228 rtx subtarget
, seq
, cc_reg
;
7230 /* If the target supports a compare-and-swap pattern that simultaneously
7231 sets some flag for success, then use it. Otherwise use the regular
7232 compare-and-swap and follow that immediately with a compare insn. */
7233 icode
= direct_optab_handler (sync_compare_and_swap_optab
, mode
);
7234 if (icode
== CODE_FOR_nothing
)
7237 do_pending_stack_adjust ();
7241 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
7244 if (subtarget
== NULL_RTX
)
7250 if (have_insn_for (COMPARE
, CCmode
))
7251 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
7255 /* We might be comparing against an old value. Try again. :-( */
7256 if (!cc_reg
&& MEM_P (old_val
))
7259 old_val
= force_reg (mode
, old_val
);
7266 return emit_store_flag_force (target
, EQ
, cc_reg
, const0_rtx
, VOIDmode
, 0, 1);
7268 return emit_store_flag_force (target
, EQ
, subtarget
, old_val
, VOIDmode
, 1, 1);
7271 /* This is a helper function for the other atomic operations. This function
7272 emits a loop that contains SEQ that iterates until a compare-and-swap
7273 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7274 a set of instructions that takes a value from OLD_REG as an input and
7275 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7276 set to the current contents of MEM. After SEQ, a compare-and-swap will
7277 attempt to update MEM with NEW_REG. The function returns true when the
7278 loop was generated successfully. */
7281 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7283 enum machine_mode mode
= GET_MODE (mem
);
7284 enum insn_code icode
;
7285 rtx label
, cmp_reg
, subtarget
, cc_reg
;
7287 /* The loop we want to generate looks like
7293 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7294 if (cmp_reg != old_reg)
7297 Note that we only do the plain load from memory once. Subsequent
7298 iterations use the value loaded by the compare-and-swap pattern. */
7300 label
= gen_label_rtx ();
7301 cmp_reg
= gen_reg_rtx (mode
);
7303 emit_move_insn (cmp_reg
, mem
);
7305 emit_move_insn (old_reg
, cmp_reg
);
7309 /* If the target supports a compare-and-swap pattern that simultaneously
7310 sets some flag for success, then use it. Otherwise use the regular
7311 compare-and-swap and follow that immediately with a compare insn. */
7312 icode
= direct_optab_handler (sync_compare_and_swap_optab
, mode
);
7313 if (icode
== CODE_FOR_nothing
)
7316 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
7318 if (subtarget
== NULL_RTX
)
7322 if (have_insn_for (COMPARE
, CCmode
))
7323 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
7327 old_reg
= const0_rtx
;
7331 if (subtarget
!= cmp_reg
)
7332 emit_move_insn (cmp_reg
, subtarget
);
7335 /* ??? Mark this jump predicted not taken? */
7336 emit_cmp_and_jump_insns (cmp_reg
, old_reg
, NE
, const0_rtx
, GET_MODE (cmp_reg
), 1,
7341 /* This function generates the atomic operation MEM CODE= VAL. In this
7342 case, we do not care about any resulting value. Returns NULL if we
7343 cannot generate the operation. */
7346 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
7348 enum machine_mode mode
= GET_MODE (mem
);
7349 enum insn_code icode
;
7352 /* Look to see if the target supports the operation directly. */
7356 icode
= direct_optab_handler (sync_add_optab
, mode
);
7359 icode
= direct_optab_handler (sync_ior_optab
, mode
);
7362 icode
= direct_optab_handler (sync_xor_optab
, mode
);
7365 icode
= direct_optab_handler (sync_and_optab
, mode
);
7368 icode
= direct_optab_handler (sync_nand_optab
, mode
);
7372 icode
= direct_optab_handler (sync_sub_optab
, mode
);
7373 if (icode
== CODE_FOR_nothing
|| CONST_INT_P (val
))
7375 icode
= direct_optab_handler (sync_add_optab
, mode
);
7376 if (icode
!= CODE_FOR_nothing
)
7378 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7388 /* Generate the direct operation, if present. */
7389 if (icode
!= CODE_FOR_nothing
)
7391 struct expand_operand ops
[2];
7393 create_fixed_operand (&ops
[0], mem
);
7394 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7395 create_convert_operand_to (&ops
[1], val
, mode
, true);
7396 if (maybe_expand_insn (icode
, 2, ops
))
7400 /* Failing that, generate a compare-and-swap loop in which we perform the
7401 operation with normal arithmetic instructions. */
7402 if (direct_optab_handler (sync_compare_and_swap_optab
, mode
)
7403 != CODE_FOR_nothing
)
7405 rtx t0
= gen_reg_rtx (mode
), t1
;
7412 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7413 true, OPTAB_LIB_WIDEN
);
7414 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7417 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7418 true, OPTAB_LIB_WIDEN
);
7419 insn
= get_insns ();
7422 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7429 /* This function generates the atomic operation MEM CODE= VAL. In this
7430 case, we do care about the resulting value: if AFTER is true then
7431 return the value MEM holds after the operation, if AFTER is false
7432 then return the value MEM holds before the operation. TARGET is an
7433 optional place for the result value to be stored. */
7436 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
7437 bool after
, rtx target
)
7439 enum machine_mode mode
= GET_MODE (mem
);
7440 enum insn_code old_code
, new_code
, icode
;
7444 /* Look to see if the target supports the operation directly. */
7448 old_code
= direct_optab_handler (sync_old_add_optab
, mode
);
7449 new_code
= direct_optab_handler (sync_new_add_optab
, mode
);
7452 old_code
= direct_optab_handler (sync_old_ior_optab
, mode
);
7453 new_code
= direct_optab_handler (sync_new_ior_optab
, mode
);
7456 old_code
= direct_optab_handler (sync_old_xor_optab
, mode
);
7457 new_code
= direct_optab_handler (sync_new_xor_optab
, mode
);
7460 old_code
= direct_optab_handler (sync_old_and_optab
, mode
);
7461 new_code
= direct_optab_handler (sync_new_and_optab
, mode
);
7464 old_code
= direct_optab_handler (sync_old_nand_optab
, mode
);
7465 new_code
= direct_optab_handler (sync_new_nand_optab
, mode
);
7469 old_code
= direct_optab_handler (sync_old_sub_optab
, mode
);
7470 new_code
= direct_optab_handler (sync_new_sub_optab
, mode
);
7471 if ((old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
7472 || CONST_INT_P (val
))
7474 old_code
= direct_optab_handler (sync_old_add_optab
, mode
);
7475 new_code
= direct_optab_handler (sync_new_add_optab
, mode
);
7476 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
7478 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
7488 /* If the target does supports the proper new/old operation, great. But
7489 if we only support the opposite old/new operation, check to see if we
7490 can compensate. In the case in which the old value is supported, then
7491 we can always perform the operation again with normal arithmetic. In
7492 the case in which the new value is supported, then we can only handle
7493 this in the case the operation is reversible. */
7498 if (icode
== CODE_FOR_nothing
)
7501 if (icode
!= CODE_FOR_nothing
)
7508 if (icode
== CODE_FOR_nothing
7509 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
7512 if (icode
!= CODE_FOR_nothing
)
7517 /* If we found something supported, great. */
7518 if (icode
!= CODE_FOR_nothing
)
7520 struct expand_operand ops
[3];
7522 create_output_operand (&ops
[0], target
, mode
);
7523 create_fixed_operand (&ops
[1], mem
);
7524 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7525 create_convert_operand_to (&ops
[2], val
, mode
, true);
7526 if (maybe_expand_insn (icode
, 3, ops
))
7528 target
= ops
[0].value
;
7530 /* If we need to compensate for using an operation with the
7531 wrong return value, do so now. */
7538 else if (code
== MINUS
)
7544 target
= expand_simple_binop (mode
, AND
, target
, val
,
7547 target
= expand_simple_unop (mode
, code
, target
,
7551 target
= expand_simple_binop (mode
, code
, target
, val
,
7560 /* Failing that, generate a compare-and-swap loop in which we perform the
7561 operation with normal arithmetic instructions. */
7562 if (direct_optab_handler (sync_compare_and_swap_optab
, mode
)
7563 != CODE_FOR_nothing
)
7565 rtx t0
= gen_reg_rtx (mode
), t1
;
7567 if (!target
|| !register_operand (target
, mode
))
7568 target
= gen_reg_rtx (mode
);
7573 emit_move_insn (target
, t0
);
7577 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7578 true, OPTAB_LIB_WIDEN
);
7579 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7582 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
7583 true, OPTAB_LIB_WIDEN
);
7585 emit_move_insn (target
, t1
);
7587 insn
= get_insns ();
7590 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7597 /* This function expands a test-and-set operation. Ideally we atomically
7598 store VAL in MEM and return the previous value in MEM. Some targets
7599 may not support this operation and only support VAL with the constant 1;
7600 in this case while the return value will be 0/1, but the exact value
7601 stored in MEM is target defined. TARGET is an option place to stick
7602 the return value. */
7605 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
7607 enum machine_mode mode
= GET_MODE (mem
);
7608 enum insn_code icode
;
7610 /* If the target supports the test-and-set directly, great. */
7611 icode
= direct_optab_handler (sync_lock_test_and_set_optab
, mode
);
7612 if (icode
!= CODE_FOR_nothing
)
7614 struct expand_operand ops
[3];
7616 create_output_operand (&ops
[0], target
, mode
);
7617 create_fixed_operand (&ops
[1], mem
);
7618 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7619 create_convert_operand_to (&ops
[2], val
, mode
, true);
7620 if (maybe_expand_insn (icode
, 3, ops
))
7621 return ops
[0].value
;
7624 /* Otherwise, use a compare-and-swap loop for the exchange. */
7625 if (direct_optab_handler (sync_compare_and_swap_optab
, mode
)
7626 != CODE_FOR_nothing
)
7628 if (!target
|| !register_operand (target
, mode
))
7629 target
= gen_reg_rtx (mode
);
7630 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
7631 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
7632 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7639 /* Return true if OPERAND is suitable for operand number OPNO of
7640 instruction ICODE. */
7643 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7645 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7646 || (insn_data
[(int) icode
].operand
[opno
].predicate
7647 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7650 /* TARGET is a target of a multiword operation that we are going to
7651 implement as a series of word-mode operations. Return true if
7652 TARGET is suitable for this purpose. */
7655 valid_multiword_target_p (rtx target
)
7657 enum machine_mode mode
;
7660 mode
= GET_MODE (target
);
7661 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
7662 if (!validate_subreg (word_mode
, mode
, target
, i
))
7667 /* Like maybe_legitimize_operand, but do not change the code of the
7668 current rtx value. */
7671 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7672 struct expand_operand
*op
)
7674 /* See if the operand matches in its current form. */
7675 if (insn_operand_matches (icode
, opno
, op
->value
))
7678 /* If the operand is a memory whose address has no side effects,
7679 try forcing the address into a register. The check for side
7680 effects is important because force_reg cannot handle things
7681 like auto-modified addresses. */
7682 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
7683 && MEM_P (op
->value
)
7684 && !side_effects_p (XEXP (op
->value
, 0)))
7686 rtx addr
, mem
, last
;
7688 last
= get_last_insn ();
7689 addr
= force_reg (Pmode
, XEXP (op
->value
, 0));
7690 mem
= replace_equiv_address (op
->value
, addr
);
7691 if (insn_operand_matches (icode
, opno
, mem
))
7696 delete_insns_since (last
);
7702 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7703 on success, storing the new operand value back in OP. */
7706 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7707 struct expand_operand
*op
)
7709 enum machine_mode mode
, imode
;
7710 bool old_volatile_ok
, result
;
7716 old_volatile_ok
= volatile_ok
;
7718 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
7719 volatile_ok
= old_volatile_ok
;
7723 gcc_assert (mode
!= VOIDmode
);
7725 && op
->value
!= const0_rtx
7726 && GET_MODE (op
->value
) == mode
7727 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7730 op
->value
= gen_reg_rtx (mode
);
7735 gcc_assert (mode
!= VOIDmode
);
7736 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7737 || GET_MODE (op
->value
) == mode
);
7738 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7741 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7744 case EXPAND_CONVERT_TO
:
7745 gcc_assert (mode
!= VOIDmode
);
7746 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7749 case EXPAND_CONVERT_FROM
:
7750 if (GET_MODE (op
->value
) != VOIDmode
)
7751 mode
= GET_MODE (op
->value
);
7753 /* The caller must tell us what mode this value has. */
7754 gcc_assert (mode
!= VOIDmode
);
7756 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7757 if (imode
!= VOIDmode
&& imode
!= mode
)
7759 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
7764 case EXPAND_ADDRESS
:
7765 gcc_assert (mode
!= VOIDmode
);
7766 op
->value
= convert_memory_address (mode
, op
->value
);
7769 case EXPAND_INTEGER
:
7770 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7771 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
7775 return insn_operand_matches (icode
, opno
, op
->value
);
7778 /* Make OP describe an input operand that should have the same value
7779 as VALUE, after any mode conversion that the target might request.
7780 TYPE is the type of VALUE. */
7783 create_convert_operand_from_type (struct expand_operand
*op
,
7784 rtx value
, tree type
)
7786 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7787 TYPE_UNSIGNED (type
));
7790 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7791 of instruction ICODE. Return true on success, leaving the new operand
7792 values in the OPS themselves. Emit no code on failure. */
7795 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7796 unsigned int nops
, struct expand_operand
*ops
)
7801 last
= get_last_insn ();
7802 for (i
= 0; i
< nops
; i
++)
7803 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7805 delete_insns_since (last
);
7811 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7812 as its operands. Return the instruction pattern on success,
7813 and emit any necessary set-up code. Return null and emit no
7817 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7818 struct expand_operand
*ops
)
7820 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7821 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7827 return GEN_FCN (icode
) (ops
[0].value
);
7829 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7831 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7833 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7836 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7837 ops
[3].value
, ops
[4].value
);
7839 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7840 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7845 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7846 as its operands. Return true on success and emit no code on failure. */
7849 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7850 struct expand_operand
*ops
)
7852 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
7861 /* Like maybe_expand_insn, but for jumps. */
7864 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7865 struct expand_operand
*ops
)
7867 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
7870 emit_jump_insn (pat
);
7876 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7880 expand_insn (enum insn_code icode
, unsigned int nops
,
7881 struct expand_operand
*ops
)
7883 if (!maybe_expand_insn (icode
, nops
, ops
))
7887 /* Like expand_insn, but for jumps. */
7890 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7891 struct expand_operand
*ops
)
7893 if (!maybe_expand_jump_insn (icode
, nops
, ops
))
7897 #include "gt-optabs.h"