1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
46 #include "basic-block.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table
[OTI_MAX
];
60 rtx libfunc_table
[LTI_MAX
];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table
[COI_MAX
];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab
[NUM_RTX_CODE
+ 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
88 /* Indexed by the machine mode, gives the insn code for vector conditional
91 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
92 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx
;
99 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
100 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
102 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
);
105 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
107 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
108 static optab
new_optab (void);
109 static convert_optab
new_convert_optab (void);
110 static inline optab
init_optab (enum rtx_code
);
111 static inline optab
init_optabv (enum rtx_code
);
112 static inline convert_optab
init_convert_optab (enum rtx_code
);
113 static void init_libfuncs (optab
, int, int, const char *, int);
114 static void init_integral_libfuncs (optab
, const char *, int);
115 static void init_floating_libfuncs (optab
, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
117 enum mode_class
, enum mode_class
);
118 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
119 enum mode_class
, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
121 enum rtx_code
, int, rtx
);
122 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
123 enum machine_mode
*, int *);
124 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
125 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
126 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
127 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
134 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
135 #if ENABLE_DECIMAL_BID_FORMAT
136 #define DECIMAL_PREFIX "bid_"
138 #define DECIMAL_PREFIX "dpd_"
142 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
143 the result of operation CODE applied to OP0 (and OP1 if it is a binary
146 If the last insn does not set TARGET, don't do anything, but return 1.
148 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
149 don't add the REG_EQUAL note but return 0. Our caller can then try
150 again, ensuring that TARGET is not one of the operands. */
153 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
155 rtx last_insn
, insn
, set
;
158 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
160 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
161 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
162 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
163 && GET_RTX_CLASS (code
) != RTX_COMPARE
164 && GET_RTX_CLASS (code
) != RTX_UNARY
)
167 if (GET_CODE (target
) == ZERO_EXTRACT
)
170 for (last_insn
= insns
;
171 NEXT_INSN (last_insn
) != NULL_RTX
;
172 last_insn
= NEXT_INSN (last_insn
))
175 set
= single_set (last_insn
);
179 if (! rtx_equal_p (SET_DEST (set
), target
)
180 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
181 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
182 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
185 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
186 besides the last insn. */
187 if (reg_overlap_mentioned_p (target
, op0
)
188 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
190 insn
= PREV_INSN (last_insn
);
191 while (insn
!= NULL_RTX
)
193 if (reg_set_p (target
, insn
))
196 insn
= PREV_INSN (insn
);
200 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
201 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
203 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
205 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
210 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
211 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
212 not actually do a sign-extend or zero-extend, but can leave the
213 higher-order bits of the result rtx undefined, for example, in the case
214 of logical operations, but not right shifts. */
217 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
218 int unsignedp
, int no_extend
)
222 /* If we don't have to extend and this is a constant, return it. */
223 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
226 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
227 extend since it will be more efficient to do so unless the signedness of
228 a promoted object differs from our extension. */
230 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
231 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
232 return convert_modes (mode
, oldmode
, op
, unsignedp
);
234 /* If MODE is no wider than a single word, we return a paradoxical
236 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
237 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
239 /* Otherwise, get an object of MODE, clobber it, and set the low-order
242 result
= gen_reg_rtx (mode
);
243 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
244 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
248 /* Return the optab used for computing the operation given by
249 the tree code, CODE. This function is not always usable (for
250 example, it cannot give complete results for multiplication
251 or division) but probably ought to be relied on more widely
252 throughout the expander. */
254 optab_for_tree_code (enum tree_code code
, tree type
)
266 return one_cmpl_optab
;
275 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
283 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
289 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
298 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
301 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
303 case REALIGN_LOAD_EXPR
:
304 return vec_realign_load_optab
;
307 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
310 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
313 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
316 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
318 case REDUC_PLUS_EXPR
:
319 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
321 case VEC_LSHIFT_EXPR
:
322 return vec_shl_optab
;
324 case VEC_RSHIFT_EXPR
:
325 return vec_shr_optab
;
327 case VEC_WIDEN_MULT_HI_EXPR
:
328 return TYPE_UNSIGNED (type
) ?
329 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
331 case VEC_WIDEN_MULT_LO_EXPR
:
332 return TYPE_UNSIGNED (type
) ?
333 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
335 case VEC_UNPACK_HI_EXPR
:
336 return TYPE_UNSIGNED (type
) ?
337 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
339 case VEC_UNPACK_LO_EXPR
:
340 return TYPE_UNSIGNED (type
) ?
341 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
343 case VEC_UNPACK_FLOAT_HI_EXPR
:
344 /* The signedness is determined from input operand. */
345 return TYPE_UNSIGNED (type
) ?
346 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
348 case VEC_UNPACK_FLOAT_LO_EXPR
:
349 /* The signedness is determined from input operand. */
350 return TYPE_UNSIGNED (type
) ?
351 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
353 case VEC_PACK_TRUNC_EXPR
:
354 return vec_pack_trunc_optab
;
356 case VEC_PACK_SAT_EXPR
:
357 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
359 case VEC_PACK_FIX_TRUNC_EXPR
:
360 return TYPE_UNSIGNED (type
) ?
361 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
367 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
371 return trapv
? addv_optab
: add_optab
;
374 return trapv
? subv_optab
: sub_optab
;
377 return trapv
? smulv_optab
: smul_optab
;
380 return trapv
? negv_optab
: neg_optab
;
383 return trapv
? absv_optab
: abs_optab
;
385 case VEC_EXTRACT_EVEN_EXPR
:
386 return vec_extract_even_optab
;
388 case VEC_EXTRACT_ODD_EXPR
:
389 return vec_extract_odd_optab
;
391 case VEC_INTERLEAVE_HIGH_EXPR
:
392 return vec_interleave_high_optab
;
394 case VEC_INTERLEAVE_LOW_EXPR
:
395 return vec_interleave_low_optab
;
403 /* Expand vector widening operations.
405 There are two different classes of operations handled here:
406 1) Operations whose result is wider than all the arguments to the operation.
407 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
408 In this case OP0 and optionally OP1 would be initialized,
409 but WIDE_OP wouldn't (not relevant for this case).
410 2) Operations whose result is of the same size as the last argument to the
411 operation, but wider than all the other arguments to the operation.
412 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
413 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
415 E.g, when called to expand the following operations, this is how
416 the arguments will be initialized:
418 widening-sum 2 oprnd0 - oprnd1
419 widening-dot-product 3 oprnd0 oprnd1 oprnd2
420 widening-mult 2 oprnd0 oprnd1 -
421 type-promotion (vec-unpack) 1 oprnd0 - - */
424 expand_widen_pattern_expr (tree exp
, rtx op0
, rtx op1
, rtx wide_op
, rtx target
,
427 tree oprnd0
, oprnd1
, oprnd2
;
428 enum machine_mode wmode
= 0, tmode0
, tmode1
= 0;
429 optab widen_pattern_optab
;
431 enum machine_mode xmode0
, xmode1
= 0, wxmode
= 0;
434 rtx xop0
, xop1
, wxop
;
435 int nops
= TREE_OPERAND_LENGTH (exp
);
437 oprnd0
= TREE_OPERAND (exp
, 0);
438 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
439 widen_pattern_optab
=
440 optab_for_tree_code (TREE_CODE (exp
), TREE_TYPE (oprnd0
));
441 icode
= (int) widen_pattern_optab
->handlers
[(int) tmode0
].insn_code
;
442 gcc_assert (icode
!= CODE_FOR_nothing
);
443 xmode0
= insn_data
[icode
].operand
[1].mode
;
447 oprnd1
= TREE_OPERAND (exp
, 1);
448 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
449 xmode1
= insn_data
[icode
].operand
[2].mode
;
452 /* The last operand is of a wider mode than the rest of the operands. */
460 gcc_assert (tmode1
== tmode0
);
462 oprnd2
= TREE_OPERAND (exp
, 2);
463 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
464 wxmode
= insn_data
[icode
].operand
[3].mode
;
468 wmode
= wxmode
= insn_data
[icode
].operand
[0].mode
;
471 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, wmode
))
472 temp
= gen_reg_rtx (wmode
);
480 /* In case the insn wants input operands in modes different from
481 those of the actual operands, convert the operands. It would
482 seem that we don't need to convert CONST_INTs, but we do, so
483 that they're properly zero-extended, sign-extended or truncated
486 if (GET_MODE (op0
) != xmode0
&& xmode0
!= VOIDmode
)
487 xop0
= convert_modes (xmode0
,
488 GET_MODE (op0
) != VOIDmode
494 if (GET_MODE (op1
) != xmode1
&& xmode1
!= VOIDmode
)
495 xop1
= convert_modes (xmode1
,
496 GET_MODE (op1
) != VOIDmode
502 if (GET_MODE (wide_op
) != wxmode
&& wxmode
!= VOIDmode
)
503 wxop
= convert_modes (wxmode
,
504 GET_MODE (wide_op
) != VOIDmode
509 /* Now, if insn's predicates don't allow our operands, put them into
512 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, xmode0
)
513 && xmode0
!= VOIDmode
)
514 xop0
= copy_to_mode_reg (xmode0
, xop0
);
518 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, xmode1
)
519 && xmode1
!= VOIDmode
)
520 xop1
= copy_to_mode_reg (xmode1
, xop1
);
524 if (! (*insn_data
[icode
].operand
[3].predicate
) (wxop
, wxmode
)
525 && wxmode
!= VOIDmode
)
526 wxop
= copy_to_mode_reg (wxmode
, wxop
);
528 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, wxop
);
531 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
537 if (! (*insn_data
[icode
].operand
[2].predicate
) (wxop
, wxmode
)
538 && wxmode
!= VOIDmode
)
539 wxop
= copy_to_mode_reg (wxmode
, wxop
);
541 pat
= GEN_FCN (icode
) (temp
, xop0
, wxop
);
544 pat
= GEN_FCN (icode
) (temp
, xop0
);
551 /* Generate code to perform an operation specified by TERNARY_OPTAB
552 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
554 UNSIGNEDP is for the case where we have to widen the operands
555 to perform the operation. It says to use zero-extension.
557 If TARGET is nonzero, the value
558 is generated there, if it is convenient to do so.
559 In all cases an rtx is returned for the locus of the value;
560 this may or may not be TARGET. */
563 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
564 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
566 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
567 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
568 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
569 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
572 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
574 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
575 != CODE_FOR_nothing
);
577 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
578 temp
= gen_reg_rtx (mode
);
582 /* In case the insn wants input operands in modes different from
583 those of the actual operands, convert the operands. It would
584 seem that we don't need to convert CONST_INTs, but we do, so
585 that they're properly zero-extended, sign-extended or truncated
588 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
589 xop0
= convert_modes (mode0
,
590 GET_MODE (op0
) != VOIDmode
595 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
596 xop1
= convert_modes (mode1
,
597 GET_MODE (op1
) != VOIDmode
602 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
603 xop2
= convert_modes (mode2
,
604 GET_MODE (op2
) != VOIDmode
609 /* Now, if insn's predicates don't allow our operands, put them into
612 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
613 && mode0
!= VOIDmode
)
614 xop0
= copy_to_mode_reg (mode0
, xop0
);
616 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
617 && mode1
!= VOIDmode
)
618 xop1
= copy_to_mode_reg (mode1
, xop1
);
620 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
621 && mode2
!= VOIDmode
)
622 xop2
= copy_to_mode_reg (mode2
, xop2
);
624 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
631 /* Like expand_binop, but return a constant rtx if the result can be
632 calculated at compile time. The arguments and return value are
633 otherwise the same as for expand_binop. */
636 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
637 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
638 enum optab_methods methods
)
640 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
642 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
648 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
651 /* Like simplify_expand_binop, but always put the result in TARGET.
652 Return true if the expansion succeeded. */
655 force_expand_binop (enum machine_mode mode
, optab binoptab
,
656 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
657 enum optab_methods methods
)
659 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
660 target
, unsignedp
, methods
);
664 emit_move_insn (target
, x
);
668 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
671 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
673 enum insn_code icode
;
674 rtx rtx_op1
, rtx_op2
;
675 enum machine_mode mode1
;
676 enum machine_mode mode2
;
677 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
678 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
679 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
683 switch (TREE_CODE (vec_shift_expr
))
685 case VEC_RSHIFT_EXPR
:
686 shift_optab
= vec_shr_optab
;
688 case VEC_LSHIFT_EXPR
:
689 shift_optab
= vec_shl_optab
;
695 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
696 gcc_assert (icode
!= CODE_FOR_nothing
);
698 mode1
= insn_data
[icode
].operand
[1].mode
;
699 mode2
= insn_data
[icode
].operand
[2].mode
;
701 rtx_op1
= expand_normal (vec_oprnd
);
702 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
703 && mode1
!= VOIDmode
)
704 rtx_op1
= force_reg (mode1
, rtx_op1
);
706 rtx_op2
= expand_normal (shift_oprnd
);
707 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
708 && mode2
!= VOIDmode
)
709 rtx_op2
= force_reg (mode2
, rtx_op2
);
712 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
713 target
= gen_reg_rtx (mode
);
715 /* Emit instruction */
716 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
723 /* This subroutine of expand_doubleword_shift handles the cases in which
724 the effective shift value is >= BITS_PER_WORD. The arguments and return
725 value are the same as for the parent routine, except that SUPERWORD_OP1
726 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
727 INTO_TARGET may be null if the caller has decided to calculate it. */
730 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
731 rtx outof_target
, rtx into_target
,
732 int unsignedp
, enum optab_methods methods
)
734 if (into_target
!= 0)
735 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
736 into_target
, unsignedp
, methods
))
739 if (outof_target
!= 0)
741 /* For a signed right shift, we must fill OUTOF_TARGET with copies
742 of the sign bit, otherwise we must fill it with zeros. */
743 if (binoptab
!= ashr_optab
)
744 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
746 if (!force_expand_binop (word_mode
, binoptab
,
747 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
748 outof_target
, unsignedp
, methods
))
754 /* This subroutine of expand_doubleword_shift handles the cases in which
755 the effective shift value is < BITS_PER_WORD. The arguments and return
756 value are the same as for the parent routine. */
759 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
760 rtx outof_input
, rtx into_input
, rtx op1
,
761 rtx outof_target
, rtx into_target
,
762 int unsignedp
, enum optab_methods methods
,
763 unsigned HOST_WIDE_INT shift_mask
)
765 optab reverse_unsigned_shift
, unsigned_shift
;
768 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
769 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
771 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
772 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
773 the opposite direction to BINOPTAB. */
774 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
776 carries
= outof_input
;
777 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
778 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
783 /* We must avoid shifting by BITS_PER_WORD bits since that is either
784 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
785 has unknown behavior. Do a single shift first, then shift by the
786 remainder. It's OK to use ~OP1 as the remainder if shift counts
787 are truncated to the mode size. */
788 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
789 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
790 if (shift_mask
== BITS_PER_WORD
- 1)
792 tmp
= immed_double_const (-1, -1, op1_mode
);
793 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
798 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
799 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
803 if (tmp
== 0 || carries
== 0)
805 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
806 carries
, tmp
, 0, unsignedp
, methods
);
810 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
811 so the result can go directly into INTO_TARGET if convenient. */
812 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
813 into_target
, unsignedp
, methods
);
817 /* Now OR in the bits carried over from OUTOF_INPUT. */
818 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
819 into_target
, unsignedp
, methods
))
822 /* Use a standard word_mode shift for the out-of half. */
823 if (outof_target
!= 0)
824 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
825 outof_target
, unsignedp
, methods
))
832 #ifdef HAVE_conditional_move
833 /* Try implementing expand_doubleword_shift using conditional moves.
834 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
835 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
836 are the shift counts to use in the former and latter case. All other
837 arguments are the same as the parent routine. */
840 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
841 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
842 rtx outof_input
, rtx into_input
,
843 rtx subword_op1
, rtx superword_op1
,
844 rtx outof_target
, rtx into_target
,
845 int unsignedp
, enum optab_methods methods
,
846 unsigned HOST_WIDE_INT shift_mask
)
848 rtx outof_superword
, into_superword
;
850 /* Put the superword version of the output into OUTOF_SUPERWORD and
852 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
853 if (outof_target
!= 0 && subword_op1
== superword_op1
)
855 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
856 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
857 into_superword
= outof_target
;
858 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
859 outof_superword
, 0, unsignedp
, methods
))
864 into_superword
= gen_reg_rtx (word_mode
);
865 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
866 outof_superword
, into_superword
,
871 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
872 if (!expand_subword_shift (op1_mode
, binoptab
,
873 outof_input
, into_input
, subword_op1
,
874 outof_target
, into_target
,
875 unsignedp
, methods
, shift_mask
))
878 /* Select between them. Do the INTO half first because INTO_SUPERWORD
879 might be the current value of OUTOF_TARGET. */
880 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
881 into_target
, into_superword
, word_mode
, false))
884 if (outof_target
!= 0)
885 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
886 outof_target
, outof_superword
,
894 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
895 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
896 input operand; the shift moves bits in the direction OUTOF_INPUT->
897 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
898 of the target. OP1 is the shift count and OP1_MODE is its mode.
899 If OP1 is constant, it will have been truncated as appropriate
900 and is known to be nonzero.
902 If SHIFT_MASK is zero, the result of word shifts is undefined when the
903 shift count is outside the range [0, BITS_PER_WORD). This routine must
904 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
906 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
907 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
908 fill with zeros or sign bits as appropriate.
910 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
911 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
912 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
913 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
916 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
917 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
918 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
919 function wants to calculate it itself.
921 Return true if the shift could be successfully synthesized. */
924 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
925 rtx outof_input
, rtx into_input
, rtx op1
,
926 rtx outof_target
, rtx into_target
,
927 int unsignedp
, enum optab_methods methods
,
928 unsigned HOST_WIDE_INT shift_mask
)
930 rtx superword_op1
, tmp
, cmp1
, cmp2
;
931 rtx subword_label
, done_label
;
932 enum rtx_code cmp_code
;
934 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
935 fill the result with sign or zero bits as appropriate. If so, the value
936 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
937 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
938 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
940 This isn't worthwhile for constant shifts since the optimizers will
941 cope better with in-range shift counts. */
942 if (shift_mask
>= BITS_PER_WORD
944 && !CONSTANT_P (op1
))
946 if (!expand_doubleword_shift (op1_mode
, binoptab
,
947 outof_input
, into_input
, op1
,
949 unsignedp
, methods
, shift_mask
))
951 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
952 outof_target
, unsignedp
, methods
))
957 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
958 is true when the effective shift value is less than BITS_PER_WORD.
959 Set SUPERWORD_OP1 to the shift count that should be used to shift
960 OUTOF_INPUT into INTO_TARGET when the condition is false. */
961 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
962 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
964 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
965 is a subword shift count. */
966 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
968 cmp2
= CONST0_RTX (op1_mode
);
974 /* Set CMP1 to OP1 - BITS_PER_WORD. */
975 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
977 cmp2
= CONST0_RTX (op1_mode
);
979 superword_op1
= cmp1
;
984 /* If we can compute the condition at compile time, pick the
985 appropriate subroutine. */
986 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
987 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
989 if (tmp
== const0_rtx
)
990 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
991 outof_target
, into_target
,
994 return expand_subword_shift (op1_mode
, binoptab
,
995 outof_input
, into_input
, op1
,
996 outof_target
, into_target
,
997 unsignedp
, methods
, shift_mask
);
1000 #ifdef HAVE_conditional_move
1001 /* Try using conditional moves to generate straight-line code. */
1003 rtx start
= get_last_insn ();
1004 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1005 cmp_code
, cmp1
, cmp2
,
1006 outof_input
, into_input
,
1008 outof_target
, into_target
,
1009 unsignedp
, methods
, shift_mask
))
1011 delete_insns_since (start
);
1015 /* As a last resort, use branches to select the correct alternative. */
1016 subword_label
= gen_label_rtx ();
1017 done_label
= gen_label_rtx ();
1020 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1021 0, 0, subword_label
);
1024 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1025 outof_target
, into_target
,
1026 unsignedp
, methods
))
1029 emit_jump_insn (gen_jump (done_label
));
1031 emit_label (subword_label
);
1033 if (!expand_subword_shift (op1_mode
, binoptab
,
1034 outof_input
, into_input
, op1
,
1035 outof_target
, into_target
,
1036 unsignedp
, methods
, shift_mask
))
1039 emit_label (done_label
);
1043 /* Subroutine of expand_binop. Perform a double word multiplication of
1044 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1045 as the target's word_mode. This function return NULL_RTX if anything
1046 goes wrong, in which case it may have already emitted instructions
1047 which need to be deleted.
1049 If we want to multiply two two-word values and have normal and widening
1050 multiplies of single-word values, we can do this with three smaller
1051 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1052 because we are not operating on one word at a time.
1054 The multiplication proceeds as follows:
1055 _______________________
1056 [__op0_high_|__op0_low__]
1057 _______________________
1058 * [__op1_high_|__op1_low__]
1059 _______________________________________________
1060 _______________________
1061 (1) [__op0_low__*__op1_low__]
1062 _______________________
1063 (2a) [__op0_low__*__op1_high_]
1064 _______________________
1065 (2b) [__op0_high_*__op1_low__]
1066 _______________________
1067 (3) [__op0_high_*__op1_high_]
1070 This gives a 4-word result. Since we are only interested in the
1071 lower 2 words, partial result (3) and the upper words of (2a) and
1072 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1073 calculated using non-widening multiplication.
1075 (1), however, needs to be calculated with an unsigned widening
1076 multiplication. If this operation is not directly supported we
1077 try using a signed widening multiplication and adjust the result.
1078 This adjustment works as follows:
1080 If both operands are positive then no adjustment is needed.
1082 If the operands have different signs, for example op0_low < 0 and
1083 op1_low >= 0, the instruction treats the most significant bit of
1084 op0_low as a sign bit instead of a bit with significance
1085 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1086 with 2**BITS_PER_WORD - op0_low, and two's complements the
1087 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1090 Similarly, if both operands are negative, we need to add
1091 (op0_low + op1_low) * 2**BITS_PER_WORD.
1093 We use a trick to adjust quickly. We logically shift op0_low right
1094 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1095 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1096 logical shift exists, we do an arithmetic right shift and subtract
1100 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1101 bool umulp
, enum optab_methods methods
)
1103 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1104 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1105 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1106 rtx product
, adjust
, product_high
, temp
;
1108 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1109 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1110 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1111 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1113 /* If we're using an unsigned multiply to directly compute the product
1114 of the low-order words of the operands and perform any required
1115 adjustments of the operands, we begin by trying two more multiplications
1116 and then computing the appropriate sum.
1118 We have checked above that the required addition is provided.
1119 Full-word addition will normally always succeed, especially if
1120 it is provided at all, so we don't worry about its failure. The
1121 multiplication may well fail, however, so we do handle that. */
1125 /* ??? This could be done with emit_store_flag where available. */
1126 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1127 NULL_RTX
, 1, methods
);
1129 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1130 NULL_RTX
, 0, OPTAB_DIRECT
);
1133 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1134 NULL_RTX
, 0, methods
);
1137 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1138 NULL_RTX
, 0, OPTAB_DIRECT
);
1145 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1146 NULL_RTX
, 0, OPTAB_DIRECT
);
1150 /* OP0_HIGH should now be dead. */
1154 /* ??? This could be done with emit_store_flag where available. */
1155 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1156 NULL_RTX
, 1, methods
);
1158 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1159 NULL_RTX
, 0, OPTAB_DIRECT
);
1162 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1163 NULL_RTX
, 0, methods
);
1166 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1167 NULL_RTX
, 0, OPTAB_DIRECT
);
1174 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1175 NULL_RTX
, 0, OPTAB_DIRECT
);
1179 /* OP1_HIGH should now be dead. */
1181 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1182 adjust
, 0, OPTAB_DIRECT
);
1184 if (target
&& !REG_P (target
))
1188 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1189 target
, 1, OPTAB_DIRECT
);
1191 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1192 target
, 1, OPTAB_DIRECT
);
1197 product_high
= operand_subword (product
, high
, 1, mode
);
1198 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1199 REG_P (product_high
) ? product_high
: adjust
,
1201 emit_move_insn (product_high
, adjust
);
1205 /* Wrapper around expand_binop which takes an rtx code to specify
1206 the operation to perform, not an optab pointer. All other
1207 arguments are the same. */
1209 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1210 rtx op1
, rtx target
, int unsignedp
,
1211 enum optab_methods methods
)
1213 optab binop
= code_to_optab
[(int) code
];
1216 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1219 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1220 binop. Order them according to commutative_operand_precedence and, if
1221 possible, try to put TARGET or a pseudo first. */
1223 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1225 int op0_prec
= commutative_operand_precedence (op0
);
1226 int op1_prec
= commutative_operand_precedence (op1
);
1228 if (op0_prec
< op1_prec
)
1231 if (op0_prec
> op1_prec
)
1234 /* With equal precedence, both orders are ok, but it is better if the
1235 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1236 if (target
== 0 || REG_P (target
))
1237 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1239 return rtx_equal_p (op1
, target
);
1243 /* Generate code to perform an operation specified by BINOPTAB
1244 on operands OP0 and OP1, with result having machine-mode MODE.
1246 UNSIGNEDP is for the case where we have to widen the operands
1247 to perform the operation. It says to use zero-extension.
1249 If TARGET is nonzero, the value
1250 is generated there, if it is convenient to do so.
1251 In all cases an rtx is returned for the locus of the value;
1252 this may or may not be TARGET. */
1255 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1256 rtx target
, int unsignedp
, enum optab_methods methods
)
1258 enum optab_methods next_methods
1259 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1260 ? OPTAB_WIDEN
: methods
);
1261 enum mode_class
class;
1262 enum machine_mode wider_mode
;
1264 int commutative_op
= 0;
1265 int shift_op
= (binoptab
->code
== ASHIFT
1266 || binoptab
->code
== ASHIFTRT
1267 || binoptab
->code
== LSHIFTRT
1268 || binoptab
->code
== ROTATE
1269 || binoptab
->code
== ROTATERT
);
1270 rtx entry_last
= get_last_insn ();
1272 bool first_pass_p
= true;
1274 class = GET_MODE_CLASS (mode
);
1276 /* If subtracting an integer constant, convert this into an addition of
1277 the negated constant. */
1279 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1281 op1
= negate_rtx (mode
, op1
);
1282 binoptab
= add_optab
;
1285 /* If we are inside an appropriately-short loop and we are optimizing,
1286 force expensive constants into a register. */
1287 if (CONSTANT_P (op0
) && optimize
1288 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1290 if (GET_MODE (op0
) != VOIDmode
)
1291 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1292 op0
= force_reg (mode
, op0
);
1295 if (CONSTANT_P (op1
) && optimize
1296 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1298 if (GET_MODE (op1
) != VOIDmode
)
1299 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1300 op1
= force_reg (mode
, op1
);
1303 /* Record where to delete back to if we backtrack. */
1304 last
= get_last_insn ();
1306 /* If operation is commutative,
1307 try to make the first operand a register.
1308 Even better, try to make it the same as the target.
1309 Also try to make the last operand a constant. */
1310 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1311 || binoptab
== smul_widen_optab
1312 || binoptab
== umul_widen_optab
1313 || binoptab
== smul_highpart_optab
1314 || binoptab
== umul_highpart_optab
)
1318 if (swap_commutative_operands_with_target (target
, op0
, op1
))
1328 /* If we can do it with a three-operand insn, do so. */
1330 if (methods
!= OPTAB_MUST_WIDEN
1331 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1333 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1334 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1335 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1336 enum machine_mode tmp_mode
;
1338 rtx xop0
= op0
, xop1
= op1
;
1343 temp
= gen_reg_rtx (mode
);
1345 /* If it is a commutative operator and the modes would match
1346 if we would swap the operands, we can save the conversions. */
1349 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1350 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1354 tmp
= op0
; op0
= op1
; op1
= tmp
;
1355 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1359 /* In case the insn wants input operands in modes different from
1360 those of the actual operands, convert the operands. It would
1361 seem that we don't need to convert CONST_INTs, but we do, so
1362 that they're properly zero-extended, sign-extended or truncated
1365 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1366 xop0
= convert_modes (mode0
,
1367 GET_MODE (op0
) != VOIDmode
1372 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1373 xop1
= convert_modes (mode1
,
1374 GET_MODE (op1
) != VOIDmode
1379 /* Now, if insn's predicates don't allow our operands, put them into
1382 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1383 && mode0
!= VOIDmode
)
1384 xop0
= copy_to_mode_reg (mode0
, xop0
);
1386 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1387 && mode1
!= VOIDmode
)
1388 xop1
= copy_to_mode_reg (mode1
, xop1
);
1390 if (binoptab
== vec_pack_trunc_optab
1391 || binoptab
== vec_pack_usat_optab
1392 || binoptab
== vec_pack_ssat_optab
1393 || binoptab
== vec_pack_ufix_trunc_optab
1394 || binoptab
== vec_pack_sfix_trunc_optab
)
1396 /* The mode of the result is different then the mode of the
1398 tmp_mode
= insn_data
[icode
].operand
[0].mode
;
1399 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1405 if (!insn_data
[icode
].operand
[0].predicate (temp
, tmp_mode
))
1406 temp
= gen_reg_rtx (tmp_mode
);
1408 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1411 /* If PAT is composed of more than one insn, try to add an appropriate
1412 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1413 operand, call ourselves again, this time without a target. */
1414 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1415 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1417 delete_insns_since (last
);
1418 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1419 unsignedp
, methods
);
1426 delete_insns_since (last
);
1429 /* If we were trying to rotate by a constant value, and that didn't
1430 work, try rotating the other direction before falling back to
1431 shifts and bitwise-or. */
1433 && (binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1434 && class == MODE_INT
1435 && GET_CODE (op1
) == CONST_INT
1437 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1439 first_pass_p
= false;
1440 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
1441 binoptab
= binoptab
== rotl_optab
? rotr_optab
: rotl_optab
;
1445 /* If this is a multiply, see if we can do a widening operation that
1446 takes operands of this mode and makes a wider mode. */
1448 if (binoptab
== smul_optab
1449 && GET_MODE_WIDER_MODE (mode
) != VOIDmode
1450 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1451 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1452 != CODE_FOR_nothing
))
1454 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1455 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1456 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1460 if (GET_MODE_CLASS (mode
) == MODE_INT
1461 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1462 GET_MODE_BITSIZE (GET_MODE (temp
))))
1463 return gen_lowpart (mode
, temp
);
1465 return convert_to_mode (mode
, temp
, unsignedp
);
1469 /* Look for a wider mode of the same class for which we think we
1470 can open-code the operation. Check for a widening multiply at the
1471 wider mode as well. */
1473 if (CLASS_HAS_WIDER_MODES_P (class)
1474 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1475 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1476 wider_mode
!= VOIDmode
;
1477 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1479 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1480 || (binoptab
== smul_optab
1481 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1482 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1483 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1484 != CODE_FOR_nothing
)))
1486 rtx xop0
= op0
, xop1
= op1
;
1489 /* For certain integer operations, we need not actually extend
1490 the narrow operands, as long as we will truncate
1491 the results to the same narrowness. */
1493 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1494 || binoptab
== xor_optab
1495 || binoptab
== add_optab
|| binoptab
== sub_optab
1496 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1497 && class == MODE_INT
)
1500 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1502 /* The second operand of a shift must always be extended. */
1503 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1504 no_extend
&& binoptab
!= ashl_optab
);
1506 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1507 unsignedp
, OPTAB_DIRECT
);
1510 if (class != MODE_INT
1511 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1512 GET_MODE_BITSIZE (wider_mode
)))
1515 target
= gen_reg_rtx (mode
);
1516 convert_move (target
, temp
, 0);
1520 return gen_lowpart (mode
, temp
);
1523 delete_insns_since (last
);
1527 /* These can be done a word at a time. */
1528 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1529 && class == MODE_INT
1530 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1531 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1537 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1538 won't be accurate, so use a new target. */
1539 if (target
== 0 || target
== op0
|| target
== op1
)
1540 target
= gen_reg_rtx (mode
);
1544 /* Do the actual arithmetic. */
1545 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1547 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1548 rtx x
= expand_binop (word_mode
, binoptab
,
1549 operand_subword_force (op0
, i
, mode
),
1550 operand_subword_force (op1
, i
, mode
),
1551 target_piece
, unsignedp
, next_methods
);
1556 if (target_piece
!= x
)
1557 emit_move_insn (target_piece
, x
);
1560 insns
= get_insns ();
1563 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1565 if (binoptab
->code
!= UNKNOWN
)
1567 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1568 copy_rtx (op0
), copy_rtx (op1
));
1572 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1577 /* Synthesize double word shifts from single word shifts. */
1578 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1579 || binoptab
== ashr_optab
)
1580 && class == MODE_INT
1581 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1582 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1583 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1584 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1585 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1587 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1588 enum machine_mode op1_mode
;
1590 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1591 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1592 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1594 /* Apply the truncation to constant shifts. */
1595 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1596 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1598 if (op1
== CONST0_RTX (op1_mode
))
1601 /* Make sure that this is a combination that expand_doubleword_shift
1602 can handle. See the comments there for details. */
1603 if (double_shift_mask
== 0
1604 || (shift_mask
== BITS_PER_WORD
- 1
1605 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1607 rtx insns
, equiv_value
;
1608 rtx into_target
, outof_target
;
1609 rtx into_input
, outof_input
;
1610 int left_shift
, outof_word
;
1612 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1613 won't be accurate, so use a new target. */
1614 if (target
== 0 || target
== op0
|| target
== op1
)
1615 target
= gen_reg_rtx (mode
);
1619 /* OUTOF_* is the word we are shifting bits away from, and
1620 INTO_* is the word that we are shifting bits towards, thus
1621 they differ depending on the direction of the shift and
1622 WORDS_BIG_ENDIAN. */
1624 left_shift
= binoptab
== ashl_optab
;
1625 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1627 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1628 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1630 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1631 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1633 if (expand_doubleword_shift (op1_mode
, binoptab
,
1634 outof_input
, into_input
, op1
,
1635 outof_target
, into_target
,
1636 unsignedp
, next_methods
, shift_mask
))
1638 insns
= get_insns ();
1641 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1642 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1649 /* Synthesize double word rotates from single word shifts. */
1650 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1651 && class == MODE_INT
1652 && GET_CODE (op1
) == CONST_INT
1653 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1654 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1655 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1658 rtx into_target
, outof_target
;
1659 rtx into_input
, outof_input
;
1661 int shift_count
, left_shift
, outof_word
;
1663 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1664 won't be accurate, so use a new target. Do this also if target is not
1665 a REG, first because having a register instead may open optimization
1666 opportunities, and second because if target and op0 happen to be MEMs
1667 designating the same location, we would risk clobbering it too early
1668 in the code sequence we generate below. */
1669 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1670 target
= gen_reg_rtx (mode
);
1674 shift_count
= INTVAL (op1
);
1676 /* OUTOF_* is the word we are shifting bits away from, and
1677 INTO_* is the word that we are shifting bits towards, thus
1678 they differ depending on the direction of the shift and
1679 WORDS_BIG_ENDIAN. */
1681 left_shift
= (binoptab
== rotl_optab
);
1682 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1684 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1685 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1687 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1688 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1690 if (shift_count
== BITS_PER_WORD
)
1692 /* This is just a word swap. */
1693 emit_move_insn (outof_target
, into_input
);
1694 emit_move_insn (into_target
, outof_input
);
1699 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1700 rtx first_shift_count
, second_shift_count
;
1701 optab reverse_unsigned_shift
, unsigned_shift
;
1703 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1704 ? lshr_optab
: ashl_optab
);
1706 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1707 ? ashl_optab
: lshr_optab
);
1709 if (shift_count
> BITS_PER_WORD
)
1711 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1712 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1716 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1717 second_shift_count
= GEN_INT (shift_count
);
1720 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1721 outof_input
, first_shift_count
,
1722 NULL_RTX
, unsignedp
, next_methods
);
1723 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1724 into_input
, second_shift_count
,
1725 NULL_RTX
, unsignedp
, next_methods
);
1727 if (into_temp1
!= 0 && into_temp2
!= 0)
1728 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1729 into_target
, unsignedp
, next_methods
);
1733 if (inter
!= 0 && inter
!= into_target
)
1734 emit_move_insn (into_target
, inter
);
1736 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1737 into_input
, first_shift_count
,
1738 NULL_RTX
, unsignedp
, next_methods
);
1739 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1740 outof_input
, second_shift_count
,
1741 NULL_RTX
, unsignedp
, next_methods
);
1743 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1744 inter
= expand_binop (word_mode
, ior_optab
,
1745 outof_temp1
, outof_temp2
,
1746 outof_target
, unsignedp
, next_methods
);
1748 if (inter
!= 0 && inter
!= outof_target
)
1749 emit_move_insn (outof_target
, inter
);
1752 insns
= get_insns ();
1757 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1758 block to help the register allocator a bit. But a multi-word
1759 rotate will need all the input bits when setting the output
1760 bits, so there clearly is a conflict between the input and
1761 output registers. So we can't use a no-conflict block here. */
1767 /* These can be done a word at a time by propagating carries. */
1768 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1769 && class == MODE_INT
1770 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1771 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1774 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1775 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1776 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1777 rtx xop0
, xop1
, xtarget
;
1779 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1780 value is one of those, use it. Otherwise, use 1 since it is the
1781 one easiest to get. */
1782 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1783 int normalizep
= STORE_FLAG_VALUE
;
1788 /* Prepare the operands. */
1789 xop0
= force_reg (mode
, op0
);
1790 xop1
= force_reg (mode
, op1
);
1792 xtarget
= gen_reg_rtx (mode
);
1794 if (target
== 0 || !REG_P (target
))
1797 /* Indicate for flow that the entire target reg is being set. */
1799 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1801 /* Do the actual arithmetic. */
1802 for (i
= 0; i
< nwords
; i
++)
1804 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1805 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1806 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1807 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1810 /* Main add/subtract of the input operands. */
1811 x
= expand_binop (word_mode
, binoptab
,
1812 op0_piece
, op1_piece
,
1813 target_piece
, unsignedp
, next_methods
);
1819 /* Store carry from main add/subtract. */
1820 carry_out
= gen_reg_rtx (word_mode
);
1821 carry_out
= emit_store_flag_force (carry_out
,
1822 (binoptab
== add_optab
1825 word_mode
, 1, normalizep
);
1832 /* Add/subtract previous carry to main result. */
1833 newx
= expand_binop (word_mode
,
1834 normalizep
== 1 ? binoptab
: otheroptab
,
1836 NULL_RTX
, 1, next_methods
);
1840 /* Get out carry from adding/subtracting carry in. */
1841 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1842 carry_tmp
= emit_store_flag_force (carry_tmp
,
1843 (binoptab
== add_optab
1846 word_mode
, 1, normalizep
);
1848 /* Logical-ior the two poss. carry together. */
1849 carry_out
= expand_binop (word_mode
, ior_optab
,
1850 carry_out
, carry_tmp
,
1851 carry_out
, 0, next_methods
);
1855 emit_move_insn (target_piece
, newx
);
1859 if (x
!= target_piece
)
1860 emit_move_insn (target_piece
, x
);
1863 carry_in
= carry_out
;
1866 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1868 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1869 || ! rtx_equal_p (target
, xtarget
))
1871 rtx temp
= emit_move_insn (target
, xtarget
);
1873 set_unique_reg_note (temp
,
1875 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1886 delete_insns_since (last
);
1889 /* Attempt to synthesize double word multiplies using a sequence of word
1890 mode multiplications. We first attempt to generate a sequence using a
1891 more efficient unsigned widening multiply, and if that fails we then
1892 try using a signed widening multiply. */
1894 if (binoptab
== smul_optab
1895 && class == MODE_INT
1896 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1897 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1898 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1900 rtx product
= NULL_RTX
;
1902 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1903 != CODE_FOR_nothing
)
1905 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1908 delete_insns_since (last
);
1911 if (product
== NULL_RTX
1912 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1913 != CODE_FOR_nothing
)
1915 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1918 delete_insns_since (last
);
1921 if (product
!= NULL_RTX
)
1923 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1925 temp
= emit_move_insn (target
? target
: product
, product
);
1926 set_unique_reg_note (temp
,
1928 gen_rtx_fmt_ee (MULT
, mode
,
1936 /* It can't be open-coded in this mode.
1937 Use a library call if one is available and caller says that's ok. */
1939 if (binoptab
->handlers
[(int) mode
].libfunc
1940 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1944 enum machine_mode op1_mode
= mode
;
1951 op1_mode
= word_mode
;
1952 /* Specify unsigned here,
1953 since negative shift counts are meaningless. */
1954 op1x
= convert_to_mode (word_mode
, op1
, 1);
1957 if (GET_MODE (op0
) != VOIDmode
1958 && GET_MODE (op0
) != mode
)
1959 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1961 /* Pass 1 for NO_QUEUE so we don't lose any increments
1962 if the libcall is cse'd or moved. */
1963 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1964 NULL_RTX
, LCT_CONST
, mode
, 2,
1965 op0
, mode
, op1x
, op1_mode
);
1967 insns
= get_insns ();
1970 target
= gen_reg_rtx (mode
);
1971 emit_libcall_block (insns
, target
, value
,
1972 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1977 delete_insns_since (last
);
1979 /* It can't be done in this mode. Can we do it in a wider mode? */
1981 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1982 || methods
== OPTAB_MUST_WIDEN
))
1984 /* Caller says, don't even try. */
1985 delete_insns_since (entry_last
);
1989 /* Compute the value of METHODS to pass to recursive calls.
1990 Don't allow widening to be tried recursively. */
1992 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1994 /* Look for a wider mode of the same class for which it appears we can do
1997 if (CLASS_HAS_WIDER_MODES_P (class))
1999 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2000 wider_mode
!= VOIDmode
;
2001 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2003 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
2004 != CODE_FOR_nothing
)
2005 || (methods
== OPTAB_LIB
2006 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
2008 rtx xop0
= op0
, xop1
= op1
;
2011 /* For certain integer operations, we need not actually extend
2012 the narrow operands, as long as we will truncate
2013 the results to the same narrowness. */
2015 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2016 || binoptab
== xor_optab
2017 || binoptab
== add_optab
|| binoptab
== sub_optab
2018 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2019 && class == MODE_INT
)
2022 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2023 unsignedp
, no_extend
);
2025 /* The second operand of a shift must always be extended. */
2026 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2027 no_extend
&& binoptab
!= ashl_optab
);
2029 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2030 unsignedp
, methods
);
2033 if (class != MODE_INT
2034 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2035 GET_MODE_BITSIZE (wider_mode
)))
2038 target
= gen_reg_rtx (mode
);
2039 convert_move (target
, temp
, 0);
2043 return gen_lowpart (mode
, temp
);
2046 delete_insns_since (last
);
2051 delete_insns_since (entry_last
);
2055 /* Expand a binary operator which has both signed and unsigned forms.
2056 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2059 If we widen unsigned operands, we may use a signed wider operation instead
2060 of an unsigned wider operation, since the result would be the same. */
2063 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2064 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2065 enum optab_methods methods
)
2068 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2069 struct optab wide_soptab
;
2071 /* Do it without widening, if possible. */
2072 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2073 unsignedp
, OPTAB_DIRECT
);
2074 if (temp
|| methods
== OPTAB_DIRECT
)
2077 /* Try widening to a signed int. Make a fake signed optab that
2078 hides any signed insn for direct use. */
2079 wide_soptab
= *soptab
;
2080 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2081 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2083 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2084 unsignedp
, OPTAB_WIDEN
);
2086 /* For unsigned operands, try widening to an unsigned int. */
2087 if (temp
== 0 && unsignedp
)
2088 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2089 unsignedp
, OPTAB_WIDEN
);
2090 if (temp
|| methods
== OPTAB_WIDEN
)
2093 /* Use the right width lib call if that exists. */
2094 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2095 if (temp
|| methods
== OPTAB_LIB
)
2098 /* Must widen and use a lib call, use either signed or unsigned. */
2099 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2100 unsignedp
, methods
);
2104 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2105 unsignedp
, methods
);
2109 /* Generate code to perform an operation specified by UNOPPTAB
2110 on operand OP0, with two results to TARG0 and TARG1.
2111 We assume that the order of the operands for the instruction
2112 is TARG0, TARG1, OP0.
2114 Either TARG0 or TARG1 may be zero, but what that means is that
2115 the result is not actually wanted. We will generate it into
2116 a dummy pseudo-reg and discard it. They may not both be zero.
2118 Returns 1 if this operation can be performed; 0 if not. */
2121 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2124 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2125 enum mode_class
class;
2126 enum machine_mode wider_mode
;
2127 rtx entry_last
= get_last_insn ();
2130 class = GET_MODE_CLASS (mode
);
2133 targ0
= gen_reg_rtx (mode
);
2135 targ1
= gen_reg_rtx (mode
);
2137 /* Record where to go back to if we fail. */
2138 last
= get_last_insn ();
2140 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2142 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2143 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
2147 if (GET_MODE (xop0
) != VOIDmode
2148 && GET_MODE (xop0
) != mode0
)
2149 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2151 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2152 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
2153 xop0
= copy_to_mode_reg (mode0
, xop0
);
2155 /* We could handle this, but we should always be called with a pseudo
2156 for our targets and all insns should take them as outputs. */
2157 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2158 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
2160 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
2167 delete_insns_since (last
);
2170 /* It can't be done in this mode. Can we do it in a wider mode? */
2172 if (CLASS_HAS_WIDER_MODES_P (class))
2174 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2175 wider_mode
!= VOIDmode
;
2176 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2178 if (unoptab
->handlers
[(int) wider_mode
].insn_code
2179 != CODE_FOR_nothing
)
2181 rtx t0
= gen_reg_rtx (wider_mode
);
2182 rtx t1
= gen_reg_rtx (wider_mode
);
2183 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2185 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2187 convert_move (targ0
, t0
, unsignedp
);
2188 convert_move (targ1
, t1
, unsignedp
);
2192 delete_insns_since (last
);
2197 delete_insns_since (entry_last
);
2201 /* Generate code to perform an operation specified by BINOPTAB
2202 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2203 We assume that the order of the operands for the instruction
2204 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2205 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2207 Either TARG0 or TARG1 may be zero, but what that means is that
2208 the result is not actually wanted. We will generate it into
2209 a dummy pseudo-reg and discard it. They may not both be zero.
2211 Returns 1 if this operation can be performed; 0 if not. */
2214 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2217 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2218 enum mode_class
class;
2219 enum machine_mode wider_mode
;
2220 rtx entry_last
= get_last_insn ();
2223 class = GET_MODE_CLASS (mode
);
2225 /* If we are inside an appropriately-short loop and we are optimizing,
2226 force expensive constants into a register. */
2227 if (CONSTANT_P (op0
) && optimize
2228 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2229 op0
= force_reg (mode
, op0
);
2231 if (CONSTANT_P (op1
) && optimize
2232 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2233 op1
= force_reg (mode
, op1
);
2236 targ0
= gen_reg_rtx (mode
);
2238 targ1
= gen_reg_rtx (mode
);
2240 /* Record where to go back to if we fail. */
2241 last
= get_last_insn ();
2243 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2245 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2246 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2247 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2249 rtx xop0
= op0
, xop1
= op1
;
2251 /* In case the insn wants input operands in modes different from
2252 those of the actual operands, convert the operands. It would
2253 seem that we don't need to convert CONST_INTs, but we do, so
2254 that they're properly zero-extended, sign-extended or truncated
2257 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2258 xop0
= convert_modes (mode0
,
2259 GET_MODE (op0
) != VOIDmode
2264 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2265 xop1
= convert_modes (mode1
,
2266 GET_MODE (op1
) != VOIDmode
2271 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2272 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2273 xop0
= copy_to_mode_reg (mode0
, xop0
);
2275 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2276 xop1
= copy_to_mode_reg (mode1
, xop1
);
2278 /* We could handle this, but we should always be called with a pseudo
2279 for our targets and all insns should take them as outputs. */
2280 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2281 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2283 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2290 delete_insns_since (last
);
2293 /* It can't be done in this mode. Can we do it in a wider mode? */
2295 if (CLASS_HAS_WIDER_MODES_P (class))
2297 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2298 wider_mode
!= VOIDmode
;
2299 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2301 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2302 != CODE_FOR_nothing
)
2304 rtx t0
= gen_reg_rtx (wider_mode
);
2305 rtx t1
= gen_reg_rtx (wider_mode
);
2306 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2307 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2309 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2312 convert_move (targ0
, t0
, unsignedp
);
2313 convert_move (targ1
, t1
, unsignedp
);
2317 delete_insns_since (last
);
2322 delete_insns_since (entry_last
);
2326 /* Expand the two-valued library call indicated by BINOPTAB, but
2327 preserve only one of the values. If TARG0 is non-NULL, the first
2328 value is placed into TARG0; otherwise the second value is placed
2329 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2330 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2331 This routine assumes that the value returned by the library call is
2332 as if the return value was of an integral mode twice as wide as the
2333 mode of OP0. Returns 1 if the call was successful. */
2336 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2337 rtx targ0
, rtx targ1
, enum rtx_code code
)
2339 enum machine_mode mode
;
2340 enum machine_mode libval_mode
;
2344 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2345 gcc_assert (!targ0
!= !targ1
);
2347 mode
= GET_MODE (op0
);
2348 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2351 /* The value returned by the library function will have twice as
2352 many bits as the nominal MODE. */
2353 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2356 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2357 NULL_RTX
, LCT_CONST
,
2361 /* Get the part of VAL containing the value that we want. */
2362 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2363 targ0
? 0 : GET_MODE_SIZE (mode
));
2364 insns
= get_insns ();
2366 /* Move the into the desired location. */
2367 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2368 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2374 /* Wrapper around expand_unop which takes an rtx code to specify
2375 the operation to perform, not an optab pointer. All other
2376 arguments are the same. */
2378 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2379 rtx target
, int unsignedp
)
2381 optab unop
= code_to_optab
[(int) code
];
2384 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2390 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2392 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2394 enum mode_class
class = GET_MODE_CLASS (mode
);
2395 if (CLASS_HAS_WIDER_MODES_P (class))
2397 enum machine_mode wider_mode
;
2398 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2399 wider_mode
!= VOIDmode
;
2400 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2402 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2403 != CODE_FOR_nothing
)
2405 rtx xop0
, temp
, last
;
2407 last
= get_last_insn ();
2410 target
= gen_reg_rtx (mode
);
2411 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2412 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2414 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2415 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2416 - GET_MODE_BITSIZE (mode
)),
2417 target
, true, OPTAB_DIRECT
);
2419 delete_insns_since (last
);
2431 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2433 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2435 enum mode_class
class = GET_MODE_CLASS (mode
);
2436 enum machine_mode wider_mode
;
2439 if (!CLASS_HAS_WIDER_MODES_P (class))
2442 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2443 wider_mode
!= VOIDmode
;
2444 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2445 if (bswap_optab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
)
2450 last
= get_last_insn ();
2452 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2453 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2456 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2457 size_int (GET_MODE_BITSIZE (wider_mode
)
2458 - GET_MODE_BITSIZE (mode
)),
2464 target
= gen_reg_rtx (mode
);
2465 emit_move_insn (target
, gen_lowpart (mode
, x
));
2468 delete_insns_since (last
);
2473 /* Try calculating bswap as two bswaps of two word-sized operands. */
2476 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2480 t1
= expand_unop (word_mode
, bswap_optab
,
2481 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2482 t0
= expand_unop (word_mode
, bswap_optab
,
2483 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2486 target
= gen_reg_rtx (mode
);
2488 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2489 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2490 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2495 /* Try calculating (parity x) as (and (popcount x) 1), where
2496 popcount can also be done in a wider mode. */
2498 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2500 enum mode_class
class = GET_MODE_CLASS (mode
);
2501 if (CLASS_HAS_WIDER_MODES_P (class))
2503 enum machine_mode wider_mode
;
2504 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2505 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2507 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2508 != CODE_FOR_nothing
)
2510 rtx xop0
, temp
, last
;
2512 last
= get_last_insn ();
2515 target
= gen_reg_rtx (mode
);
2516 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2517 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2520 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2521 target
, true, OPTAB_DIRECT
);
2523 delete_insns_since (last
);
2532 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2533 conditions, VAL may already be a SUBREG against which we cannot generate
2534 a further SUBREG. In this case, we expect forcing the value into a
2535 register will work around the situation. */
2538 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2539 enum machine_mode imode
)
2542 ret
= lowpart_subreg (omode
, val
, imode
);
2545 val
= force_reg (imode
, val
);
2546 ret
= lowpart_subreg (omode
, val
, imode
);
2547 gcc_assert (ret
!= NULL
);
2552 /* Expand a floating point absolute value or negation operation via a
2553 logical operation on the sign bit. */
2556 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2557 rtx op0
, rtx target
)
2559 const struct real_format
*fmt
;
2560 int bitpos
, word
, nwords
, i
;
2561 enum machine_mode imode
;
2562 HOST_WIDE_INT hi
, lo
;
2565 /* The format has to have a simple sign bit. */
2566 fmt
= REAL_MODE_FORMAT (mode
);
2570 bitpos
= fmt
->signbit_rw
;
2574 /* Don't create negative zeros if the format doesn't support them. */
2575 if (code
== NEG
&& !fmt
->has_signed_zero
)
2578 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2580 imode
= int_mode_for_mode (mode
);
2581 if (imode
== BLKmode
)
2590 if (FLOAT_WORDS_BIG_ENDIAN
)
2591 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2593 word
= bitpos
/ BITS_PER_WORD
;
2594 bitpos
= bitpos
% BITS_PER_WORD
;
2595 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2598 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2601 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2605 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2611 if (target
== 0 || target
== op0
)
2612 target
= gen_reg_rtx (mode
);
2618 for (i
= 0; i
< nwords
; ++i
)
2620 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2621 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2625 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2627 immed_double_const (lo
, hi
, imode
),
2628 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2629 if (temp
!= targ_piece
)
2630 emit_move_insn (targ_piece
, temp
);
2633 emit_move_insn (targ_piece
, op0_piece
);
2636 insns
= get_insns ();
2639 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2640 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2644 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2645 gen_lowpart (imode
, op0
),
2646 immed_double_const (lo
, hi
, imode
),
2647 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2648 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2650 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2651 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2657 /* Generate code to perform an operation specified by UNOPTAB
2658 on operand OP0, with result having machine-mode MODE.
2660 UNSIGNEDP is for the case where we have to widen the operands
2661 to perform the operation. It says to use zero-extension.
2663 If TARGET is nonzero, the value
2664 is generated there, if it is convenient to do so.
2665 In all cases an rtx is returned for the locus of the value;
2666 this may or may not be TARGET. */
2669 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2672 enum mode_class
class;
2673 enum machine_mode wider_mode
;
2675 rtx last
= get_last_insn ();
2678 class = GET_MODE_CLASS (mode
);
2680 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2682 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2683 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2689 temp
= gen_reg_rtx (mode
);
2691 if (GET_MODE (xop0
) != VOIDmode
2692 && GET_MODE (xop0
) != mode0
)
2693 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2695 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2697 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2698 xop0
= copy_to_mode_reg (mode0
, xop0
);
2700 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2701 temp
= gen_reg_rtx (mode
);
2703 pat
= GEN_FCN (icode
) (temp
, xop0
);
2706 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2707 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2709 delete_insns_since (last
);
2710 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2718 delete_insns_since (last
);
2721 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2723 /* Widening clz needs special treatment. */
2724 if (unoptab
== clz_optab
)
2726 temp
= widen_clz (mode
, op0
, target
);
2733 /* Widening (or narrowing) bswap needs special treatment. */
2734 if (unoptab
== bswap_optab
)
2736 temp
= widen_bswap (mode
, op0
, target
);
2740 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2741 && unoptab
->handlers
[word_mode
].insn_code
!= CODE_FOR_nothing
)
2743 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2751 if (CLASS_HAS_WIDER_MODES_P (class))
2752 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2753 wider_mode
!= VOIDmode
;
2754 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2756 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2760 /* For certain operations, we need not actually extend
2761 the narrow operand, as long as we will truncate the
2762 results to the same narrowness. */
2764 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2765 (unoptab
== neg_optab
2766 || unoptab
== one_cmpl_optab
)
2767 && class == MODE_INT
);
2769 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2774 if (class != MODE_INT
2775 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2776 GET_MODE_BITSIZE (wider_mode
)))
2779 target
= gen_reg_rtx (mode
);
2780 convert_move (target
, temp
, 0);
2784 return gen_lowpart (mode
, temp
);
2787 delete_insns_since (last
);
2791 /* These can be done a word at a time. */
2792 if (unoptab
== one_cmpl_optab
2793 && class == MODE_INT
2794 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2795 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2800 if (target
== 0 || target
== op0
)
2801 target
= gen_reg_rtx (mode
);
2805 /* Do the actual arithmetic. */
2806 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2808 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2809 rtx x
= expand_unop (word_mode
, unoptab
,
2810 operand_subword_force (op0
, i
, mode
),
2811 target_piece
, unsignedp
);
2813 if (target_piece
!= x
)
2814 emit_move_insn (target_piece
, x
);
2817 insns
= get_insns ();
2820 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2821 gen_rtx_fmt_e (unoptab
->code
, mode
,
2826 if (unoptab
->code
== NEG
)
2828 /* Try negating floating point values by flipping the sign bit. */
2829 if (SCALAR_FLOAT_MODE_P (mode
))
2831 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2836 /* If there is no negation pattern, and we have no negative zero,
2837 try subtracting from zero. */
2838 if (!HONOR_SIGNED_ZEROS (mode
))
2840 temp
= expand_binop (mode
, (unoptab
== negv_optab
2841 ? subv_optab
: sub_optab
),
2842 CONST0_RTX (mode
), op0
, target
,
2843 unsignedp
, OPTAB_DIRECT
);
2849 /* Try calculating parity (x) as popcount (x) % 2. */
2850 if (unoptab
== parity_optab
)
2852 temp
= expand_parity (mode
, op0
, target
);
2858 /* Now try a library call in this mode. */
2859 if (unoptab
->handlers
[(int) mode
].libfunc
)
2863 enum machine_mode outmode
= mode
;
2865 /* All of these functions return small values. Thus we choose to
2866 have them return something that isn't a double-word. */
2867 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2868 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2870 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2874 /* Pass 1 for NO_QUEUE so we don't lose any increments
2875 if the libcall is cse'd or moved. */
2876 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2877 NULL_RTX
, LCT_CONST
, outmode
,
2879 insns
= get_insns ();
2882 target
= gen_reg_rtx (outmode
);
2883 emit_libcall_block (insns
, target
, value
,
2884 gen_rtx_fmt_e (unoptab
->code
, outmode
, op0
));
2889 /* It can't be done in this mode. Can we do it in a wider mode? */
2891 if (CLASS_HAS_WIDER_MODES_P (class))
2893 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2894 wider_mode
!= VOIDmode
;
2895 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2897 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2898 != CODE_FOR_nothing
)
2899 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2903 /* For certain operations, we need not actually extend
2904 the narrow operand, as long as we will truncate the
2905 results to the same narrowness. */
2907 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2908 (unoptab
== neg_optab
2909 || unoptab
== one_cmpl_optab
)
2910 && class == MODE_INT
);
2912 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2915 /* If we are generating clz using wider mode, adjust the
2917 if (unoptab
== clz_optab
&& temp
!= 0)
2918 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2919 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2920 - GET_MODE_BITSIZE (mode
)),
2921 target
, true, OPTAB_DIRECT
);
2925 if (class != MODE_INT
)
2928 target
= gen_reg_rtx (mode
);
2929 convert_move (target
, temp
, 0);
2933 return gen_lowpart (mode
, temp
);
2936 delete_insns_since (last
);
2941 /* One final attempt at implementing negation via subtraction,
2942 this time allowing widening of the operand. */
2943 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2946 temp
= expand_binop (mode
,
2947 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2948 CONST0_RTX (mode
), op0
,
2949 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2957 /* Emit code to compute the absolute value of OP0, with result to
2958 TARGET if convenient. (TARGET may be 0.) The return value says
2959 where the result actually is to be found.
2961 MODE is the mode of the operand; the mode of the result is
2962 different but can be deduced from MODE.
2967 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2968 int result_unsignedp
)
2973 result_unsignedp
= 1;
2975 /* First try to do it with a special abs instruction. */
2976 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2981 /* For floating point modes, try clearing the sign bit. */
2982 if (SCALAR_FLOAT_MODE_P (mode
))
2984 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2989 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2990 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2991 && !HONOR_SIGNED_ZEROS (mode
))
2993 rtx last
= get_last_insn ();
2995 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2997 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3003 delete_insns_since (last
);
3006 /* If this machine has expensive jumps, we can do integer absolute
3007 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3008 where W is the width of MODE. */
3010 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
3012 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3013 size_int (GET_MODE_BITSIZE (mode
) - 1),
3016 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3019 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3020 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3030 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3031 int result_unsignedp
, int safe
)
3036 result_unsignedp
= 1;
3038 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3042 /* If that does not win, use conditional jump and negate. */
3044 /* It is safe to use the target if it is the same
3045 as the source if this is also a pseudo register */
3046 if (op0
== target
&& REG_P (op0
)
3047 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3050 op1
= gen_label_rtx ();
3051 if (target
== 0 || ! safe
3052 || GET_MODE (target
) != mode
3053 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3055 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3056 target
= gen_reg_rtx (mode
);
3058 emit_move_insn (target
, op0
);
3061 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3062 NULL_RTX
, NULL_RTX
, op1
);
3064 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3067 emit_move_insn (target
, op0
);
3073 /* A subroutine of expand_copysign, perform the copysign operation using the
3074 abs and neg primitives advertised to exist on the target. The assumption
3075 is that we have a split register file, and leaving op0 in fp registers,
3076 and not playing with subregs so much, will help the register allocator. */
3079 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3080 int bitpos
, bool op0_is_abs
)
3082 enum machine_mode imode
;
3083 HOST_WIDE_INT hi
, lo
;
3092 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3099 if (target
== NULL_RTX
)
3100 target
= copy_to_reg (op0
);
3102 emit_move_insn (target
, op0
);
3105 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3107 imode
= int_mode_for_mode (mode
);
3108 if (imode
== BLKmode
)
3110 op1
= gen_lowpart (imode
, op1
);
3115 if (FLOAT_WORDS_BIG_ENDIAN
)
3116 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3118 word
= bitpos
/ BITS_PER_WORD
;
3119 bitpos
= bitpos
% BITS_PER_WORD
;
3120 op1
= operand_subword_force (op1
, word
, mode
);
3123 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3126 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3130 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3134 op1
= expand_binop (imode
, and_optab
, op1
,
3135 immed_double_const (lo
, hi
, imode
),
3136 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3138 label
= gen_label_rtx ();
3139 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3141 if (GET_CODE (op0
) == CONST_DOUBLE
)
3142 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3144 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3146 emit_move_insn (target
, op0
);
3154 /* A subroutine of expand_copysign, perform the entire copysign operation
3155 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3156 is true if op0 is known to have its sign bit clear. */
3159 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3160 int bitpos
, bool op0_is_abs
)
3162 enum machine_mode imode
;
3163 HOST_WIDE_INT hi
, lo
;
3164 int word
, nwords
, i
;
3167 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3169 imode
= int_mode_for_mode (mode
);
3170 if (imode
== BLKmode
)
3179 if (FLOAT_WORDS_BIG_ENDIAN
)
3180 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3182 word
= bitpos
/ BITS_PER_WORD
;
3183 bitpos
= bitpos
% BITS_PER_WORD
;
3184 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3187 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
3190 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
3194 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
3198 if (target
== 0 || target
== op0
|| target
== op1
)
3199 target
= gen_reg_rtx (mode
);
3205 for (i
= 0; i
< nwords
; ++i
)
3207 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3208 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3213 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
3214 immed_double_const (~lo
, ~hi
, imode
),
3215 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3217 op1
= expand_binop (imode
, and_optab
,
3218 operand_subword_force (op1
, i
, mode
),
3219 immed_double_const (lo
, hi
, imode
),
3220 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3222 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3223 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3224 if (temp
!= targ_piece
)
3225 emit_move_insn (targ_piece
, temp
);
3228 emit_move_insn (targ_piece
, op0_piece
);
3231 insns
= get_insns ();
3234 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
3238 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3239 immed_double_const (lo
, hi
, imode
),
3240 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3242 op0
= gen_lowpart (imode
, op0
);
3244 op0
= expand_binop (imode
, and_optab
, op0
,
3245 immed_double_const (~lo
, ~hi
, imode
),
3246 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3248 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3249 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3250 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3256 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3257 scalar floating point mode. Return NULL if we do not know how to
3258 expand the operation inline. */
3261 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3263 enum machine_mode mode
= GET_MODE (op0
);
3264 const struct real_format
*fmt
;
3268 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3269 gcc_assert (GET_MODE (op1
) == mode
);
3271 /* First try to do it with a special instruction. */
3272 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3273 target
, 0, OPTAB_DIRECT
);
3277 fmt
= REAL_MODE_FORMAT (mode
);
3278 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3282 if (GET_CODE (op0
) == CONST_DOUBLE
)
3284 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3285 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3289 if (fmt
->signbit_ro
>= 0
3290 && (GET_CODE (op0
) == CONST_DOUBLE
3291 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
3292 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
3294 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3295 fmt
->signbit_ro
, op0_is_abs
);
3300 if (fmt
->signbit_rw
< 0)
3302 return expand_copysign_bit (mode
, op0
, op1
, target
,
3303 fmt
->signbit_rw
, op0_is_abs
);
3306 /* Generate an instruction whose insn-code is INSN_CODE,
3307 with two operands: an output TARGET and an input OP0.
3308 TARGET *must* be nonzero, and the output is always stored there.
3309 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3310 the value that is stored into TARGET. */
3313 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3316 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3321 /* Now, if insn does not accept our operands, put them into pseudos. */
3323 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
3324 op0
= copy_to_mode_reg (mode0
, op0
);
3326 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
3327 temp
= gen_reg_rtx (GET_MODE (temp
));
3329 pat
= GEN_FCN (icode
) (temp
, op0
);
3331 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3332 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3337 emit_move_insn (target
, temp
);
3340 struct no_conflict_data
3342 rtx target
, first
, insn
;
3346 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3347 Set P->must_stay if the currently examined clobber / store has to stay
3348 in the list of insns that constitute the actual no_conflict block /
3351 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
3353 struct no_conflict_data
*p
= p0
;
3355 /* If this inns directly contributes to setting the target, it must stay. */
3356 if (reg_overlap_mentioned_p (p
->target
, dest
))
3357 p
->must_stay
= true;
3358 /* If we haven't committed to keeping any other insns in the list yet,
3359 there is nothing more to check. */
3360 else if (p
->insn
== p
->first
)
3362 /* If this insn sets / clobbers a register that feeds one of the insns
3363 already in the list, this insn has to stay too. */
3364 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3365 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3366 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3367 /* Likewise if this insn depends on a register set by a previous
3368 insn in the list, or if it sets a result (presumably a hard
3369 register) that is set or clobbered by a previous insn.
3370 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3371 SET_DEST perform the former check on the address, and the latter
3372 check on the MEM. */
3373 || (GET_CODE (set
) == SET
3374 && (modified_in_p (SET_SRC (set
), p
->first
)
3375 || modified_in_p (SET_DEST (set
), p
->first
)
3376 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3377 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3378 p
->must_stay
= true;
3381 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3382 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3383 is possible to do so. */
3386 maybe_encapsulate_block (rtx first
, rtx last
, rtx equiv
)
3388 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3390 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3391 encapsulated region would not be in one basic block, i.e. when
3392 there is a control_flow_insn_p insn between FIRST and LAST. */
3393 bool attach_libcall_retval_notes
= true;
3394 rtx insn
, next
= NEXT_INSN (last
);
3396 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3397 if (control_flow_insn_p (insn
))
3399 attach_libcall_retval_notes
= false;
3403 if (attach_libcall_retval_notes
)
3405 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3407 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3413 /* Emit code to perform a series of operations on a multi-word quantity, one
3416 Such a block is preceded by a CLOBBER of the output, consists of multiple
3417 insns, each setting one word of the output, and followed by a SET copying
3418 the output to itself.
3420 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3421 note indicating that it doesn't conflict with the (also multi-word)
3422 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3425 INSNS is a block of code generated to perform the operation, not including
3426 the CLOBBER and final copy. All insns that compute intermediate values
3427 are first emitted, followed by the block as described above.
3429 TARGET, OP0, and OP1 are the output and inputs of the operations,
3430 respectively. OP1 may be zero for a unary operation.
3432 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3435 If TARGET is not a register, INSNS is simply emitted with no special
3436 processing. Likewise if anything in INSNS is not an INSN or if
3437 there is a libcall block inside INSNS.
3439 The final insn emitted is returned. */
3442 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3444 rtx prev
, next
, first
, last
, insn
;
3446 if (!REG_P (target
) || reload_in_progress
)
3447 return emit_insn (insns
);
3449 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3450 if (!NONJUMP_INSN_P (insn
)
3451 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3452 return emit_insn (insns
);
3454 /* First emit all insns that do not store into words of the output and remove
3455 these from the list. */
3456 for (insn
= insns
; insn
; insn
= next
)
3459 struct no_conflict_data data
;
3461 next
= NEXT_INSN (insn
);
3463 /* Some ports (cris) create a libcall regions at their own. We must
3464 avoid any potential nesting of LIBCALLs. */
3465 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3466 remove_note (insn
, note
);
3467 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3468 remove_note (insn
, note
);
3470 data
.target
= target
;
3474 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3475 if (! data
.must_stay
)
3477 if (PREV_INSN (insn
))
3478 NEXT_INSN (PREV_INSN (insn
)) = next
;
3483 PREV_INSN (next
) = PREV_INSN (insn
);
3489 prev
= get_last_insn ();
3491 /* Now write the CLOBBER of the output, followed by the setting of each
3492 of the words, followed by the final copy. */
3493 if (target
!= op0
&& target
!= op1
)
3494 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3496 for (insn
= insns
; insn
; insn
= next
)
3498 next
= NEXT_INSN (insn
);
3501 if (op1
&& REG_P (op1
))
3502 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3505 if (op0
&& REG_P (op0
))
3506 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3510 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3511 != CODE_FOR_nothing
)
3513 last
= emit_move_insn (target
, target
);
3515 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3519 last
= get_last_insn ();
3521 /* Remove any existing REG_EQUAL note from "last", or else it will
3522 be mistaken for a note referring to the full contents of the
3523 alleged libcall value when found together with the REG_RETVAL
3524 note added below. An existing note can come from an insn
3525 expansion at "last". */
3526 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3530 first
= get_insns ();
3532 first
= NEXT_INSN (prev
);
3534 maybe_encapsulate_block (first
, last
, equiv
);
3539 /* Emit code to make a call to a constant function or a library call.
3541 INSNS is a list containing all insns emitted in the call.
3542 These insns leave the result in RESULT. Our block is to copy RESULT
3543 to TARGET, which is logically equivalent to EQUIV.
3545 We first emit any insns that set a pseudo on the assumption that these are
3546 loading constants into registers; doing so allows them to be safely cse'ed
3547 between blocks. Then we emit all the other insns in the block, followed by
3548 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3549 note with an operand of EQUIV.
3551 Moving assignments to pseudos outside of the block is done to improve
3552 the generated code, but is not required to generate correct code,
3553 hence being unable to move an assignment is not grounds for not making
3554 a libcall block. There are two reasons why it is safe to leave these
3555 insns inside the block: First, we know that these pseudos cannot be
3556 used in generated RTL outside the block since they are created for
3557 temporary purposes within the block. Second, CSE will not record the
3558 values of anything set inside a libcall block, so we know they must
3559 be dead at the end of the block.
3561 Except for the first group of insns (the ones setting pseudos), the
3562 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3565 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3567 rtx final_dest
= target
;
3568 rtx prev
, next
, first
, last
, insn
;
3570 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3571 into a MEM later. Protect the libcall block from this change. */
3572 if (! REG_P (target
) || REG_USERVAR_P (target
))
3573 target
= gen_reg_rtx (GET_MODE (target
));
3575 /* If we're using non-call exceptions, a libcall corresponding to an
3576 operation that may trap may also trap. */
3577 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3579 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3582 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3584 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3585 remove_note (insn
, note
);
3589 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3590 reg note to indicate that this call cannot throw or execute a nonlocal
3591 goto (unless there is already a REG_EH_REGION note, in which case
3593 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3596 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3599 XEXP (note
, 0) = constm1_rtx
;
3601 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3605 /* First emit all insns that set pseudos. Remove them from the list as
3606 we go. Avoid insns that set pseudos which were referenced in previous
3607 insns. These can be generated by move_by_pieces, for example,
3608 to update an address. Similarly, avoid insns that reference things
3609 set in previous insns. */
3611 for (insn
= insns
; insn
; insn
= next
)
3613 rtx set
= single_set (insn
);
3616 /* Some ports (cris) create a libcall regions at their own. We must
3617 avoid any potential nesting of LIBCALLs. */
3618 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3619 remove_note (insn
, note
);
3620 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3621 remove_note (insn
, note
);
3623 next
= NEXT_INSN (insn
);
3625 if (set
!= 0 && REG_P (SET_DEST (set
))
3626 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3628 struct no_conflict_data data
;
3630 data
.target
= const0_rtx
;
3634 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3635 if (! data
.must_stay
)
3637 if (PREV_INSN (insn
))
3638 NEXT_INSN (PREV_INSN (insn
)) = next
;
3643 PREV_INSN (next
) = PREV_INSN (insn
);
3649 /* Some ports use a loop to copy large arguments onto the stack.
3650 Don't move anything outside such a loop. */
3655 prev
= get_last_insn ();
3657 /* Write the remaining insns followed by the final copy. */
3659 for (insn
= insns
; insn
; insn
= next
)
3661 next
= NEXT_INSN (insn
);
3666 last
= emit_move_insn (target
, result
);
3667 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3668 != CODE_FOR_nothing
)
3669 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3672 /* Remove any existing REG_EQUAL note from "last", or else it will
3673 be mistaken for a note referring to the full contents of the
3674 libcall value when found together with the REG_RETVAL note added
3675 below. An existing note can come from an insn expansion at
3677 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3680 if (final_dest
!= target
)
3681 emit_move_insn (final_dest
, target
);
3684 first
= get_insns ();
3686 first
= NEXT_INSN (prev
);
3688 maybe_encapsulate_block (first
, last
, equiv
);
3691 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3692 PURPOSE describes how this comparison will be used. CODE is the rtx
3693 comparison code we will be using.
3695 ??? Actually, CODE is slightly weaker than that. A target is still
3696 required to implement all of the normal bcc operations, but not
3697 required to implement all (or any) of the unordered bcc operations. */
3700 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3701 enum can_compare_purpose purpose
)
3705 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3707 if (purpose
== ccp_jump
)
3708 return bcc_gen_fctn
[(int) code
] != NULL
;
3709 else if (purpose
== ccp_store_flag
)
3710 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3712 /* There's only one cmov entry point, and it's allowed to fail. */
3715 if (purpose
== ccp_jump
3716 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3718 if (purpose
== ccp_cmov
3719 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3721 if (purpose
== ccp_store_flag
3722 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3724 mode
= GET_MODE_WIDER_MODE (mode
);
3726 while (mode
!= VOIDmode
);
3731 /* This function is called when we are going to emit a compare instruction that
3732 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3734 *PMODE is the mode of the inputs (in case they are const_int).
3735 *PUNSIGNEDP nonzero says that the operands are unsigned;
3736 this matters if they need to be widened.
3738 If they have mode BLKmode, then SIZE specifies the size of both operands.
3740 This function performs all the setup necessary so that the caller only has
3741 to emit a single comparison insn. This setup can involve doing a BLKmode
3742 comparison or emitting a library call to perform the comparison if no insn
3743 is available to handle it.
3744 The values which are passed in through pointers can be modified; the caller
3745 should perform the comparison on the modified values. Constant
3746 comparisons must have already been folded. */
3749 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3750 enum machine_mode
*pmode
, int *punsignedp
,
3751 enum can_compare_purpose purpose
)
3753 enum machine_mode mode
= *pmode
;
3754 rtx x
= *px
, y
= *py
;
3755 int unsignedp
= *punsignedp
;
3757 /* If we are inside an appropriately-short loop and we are optimizing,
3758 force expensive constants into a register. */
3759 if (CONSTANT_P (x
) && optimize
3760 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3761 x
= force_reg (mode
, x
);
3763 if (CONSTANT_P (y
) && optimize
3764 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3765 y
= force_reg (mode
, y
);
3768 /* Make sure if we have a canonical comparison. The RTL
3769 documentation states that canonical comparisons are required only
3770 for targets which have cc0. */
3771 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3774 /* Don't let both operands fail to indicate the mode. */
3775 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3776 x
= force_reg (mode
, x
);
3778 /* Handle all BLKmode compares. */
3780 if (mode
== BLKmode
)
3782 enum machine_mode cmp_mode
, result_mode
;
3783 enum insn_code cmp_code
;
3788 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3792 /* Try to use a memory block compare insn - either cmpstr
3793 or cmpmem will do. */
3794 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3795 cmp_mode
!= VOIDmode
;
3796 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3798 cmp_code
= cmpmem_optab
[cmp_mode
];
3799 if (cmp_code
== CODE_FOR_nothing
)
3800 cmp_code
= cmpstr_optab
[cmp_mode
];
3801 if (cmp_code
== CODE_FOR_nothing
)
3802 cmp_code
= cmpstrn_optab
[cmp_mode
];
3803 if (cmp_code
== CODE_FOR_nothing
)
3806 /* Must make sure the size fits the insn's mode. */
3807 if ((GET_CODE (size
) == CONST_INT
3808 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3809 || (GET_MODE_BITSIZE (GET_MODE (size
))
3810 > GET_MODE_BITSIZE (cmp_mode
)))
3813 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3814 result
= gen_reg_rtx (result_mode
);
3815 size
= convert_to_mode (cmp_mode
, size
, 1);
3816 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3820 *pmode
= result_mode
;
3824 /* Otherwise call a library function, memcmp. */
3825 libfunc
= memcmp_libfunc
;
3826 length_type
= sizetype
;
3827 result_mode
= TYPE_MODE (integer_type_node
);
3828 cmp_mode
= TYPE_MODE (length_type
);
3829 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3830 TYPE_UNSIGNED (length_type
));
3832 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3839 *pmode
= result_mode
;
3843 /* Don't allow operands to the compare to trap, as that can put the
3844 compare and branch in different basic blocks. */
3845 if (flag_non_call_exceptions
)
3848 x
= force_reg (mode
, x
);
3850 y
= force_reg (mode
, y
);
3855 if (can_compare_p (*pcomparison
, mode
, purpose
))
3858 /* Handle a lib call just for the mode we are using. */
3860 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& !SCALAR_FLOAT_MODE_P (mode
))
3862 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3865 /* If we want unsigned, and this mode has a distinct unsigned
3866 comparison routine, use that. */
3867 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3868 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3870 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3871 word_mode
, 2, x
, mode
, y
, mode
);
3873 /* There are two kinds of comparison routines. Biased routines
3874 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3875 of gcc expect that the comparison operation is equivalent
3876 to the modified comparison. For signed comparisons compare the
3877 result against 1 in the biased case, and zero in the unbiased
3878 case. For unsigned comparisons always compare against 1 after
3879 biasing the unbiased result by adding 1. This gives us a way to
3885 if (!TARGET_LIB_INT_CMP_BIASED
)
3888 *px
= plus_constant (result
, 1);
3895 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3896 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3899 /* Before emitting an insn with code ICODE, make sure that X, which is going
3900 to be used for operand OPNUM of the insn, is converted from mode MODE to
3901 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3902 that it is accepted by the operand predicate. Return the new value. */
3905 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3906 enum machine_mode wider_mode
, int unsignedp
)
3908 if (mode
!= wider_mode
)
3909 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3911 if (!insn_data
[icode
].operand
[opnum
].predicate
3912 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3916 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3922 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3923 we can do the comparison.
3924 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3925 be NULL_RTX which indicates that only a comparison is to be generated. */
3928 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3929 enum rtx_code comparison
, int unsignedp
, rtx label
)
3931 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3932 enum mode_class
class = GET_MODE_CLASS (mode
);
3933 enum machine_mode wider_mode
= mode
;
3935 /* Try combined insns first. */
3938 enum insn_code icode
;
3939 PUT_MODE (test
, wider_mode
);
3943 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3945 if (icode
!= CODE_FOR_nothing
3946 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3948 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3949 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3950 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3955 /* Handle some compares against zero. */
3956 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3957 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3959 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3960 emit_insn (GEN_FCN (icode
) (x
));
3962 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3966 /* Handle compares for which there is a directly suitable insn. */
3968 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3969 if (icode
!= CODE_FOR_nothing
)
3971 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3972 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3973 emit_insn (GEN_FCN (icode
) (x
, y
));
3975 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3979 if (!CLASS_HAS_WIDER_MODES_P (class))
3982 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3984 while (wider_mode
!= VOIDmode
);
3989 /* Generate code to compare X with Y so that the condition codes are
3990 set and to jump to LABEL if the condition is true. If X is a
3991 constant and Y is not a constant, then the comparison is swapped to
3992 ensure that the comparison RTL has the canonical form.
3994 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3995 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3996 the proper branch condition code.
3998 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4000 MODE is the mode of the inputs (in case they are const_int).
4002 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4003 be passed unchanged to emit_cmp_insn, then potentially converted into an
4004 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4007 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4008 enum machine_mode mode
, int unsignedp
, rtx label
)
4010 rtx op0
= x
, op1
= y
;
4012 /* Swap operands and condition to ensure canonical RTL. */
4013 if (swap_commutative_operands_p (x
, y
))
4015 /* If we're not emitting a branch, this means some caller
4020 comparison
= swap_condition (comparison
);
4024 /* If OP0 is still a constant, then both X and Y must be constants.
4025 Force X into a register to create canonical RTL. */
4026 if (CONSTANT_P (op0
))
4027 op0
= force_reg (mode
, op0
);
4031 comparison
= unsigned_condition (comparison
);
4033 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
4035 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
4038 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4041 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4042 enum machine_mode mode
, int unsignedp
)
4044 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
4047 /* Emit a library call comparison between floating point X and Y.
4048 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4051 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
4052 enum machine_mode
*pmode
, int *punsignedp
)
4054 enum rtx_code comparison
= *pcomparison
;
4055 enum rtx_code swapped
= swap_condition (comparison
);
4056 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4059 enum machine_mode orig_mode
= GET_MODE (x
);
4060 enum machine_mode mode
;
4061 rtx value
, target
, insns
, equiv
;
4063 bool reversed_p
= false;
4065 for (mode
= orig_mode
;
4067 mode
= GET_MODE_WIDER_MODE (mode
))
4069 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
4072 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
4075 tmp
= x
; x
= y
; y
= tmp
;
4076 comparison
= swapped
;
4080 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
4081 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
4083 comparison
= reversed
;
4089 gcc_assert (mode
!= VOIDmode
);
4091 if (mode
!= orig_mode
)
4093 x
= convert_to_mode (mode
, x
, 0);
4094 y
= convert_to_mode (mode
, y
, 0);
4097 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4098 the RTL. The allows the RTL optimizers to delete the libcall if the
4099 condition can be determined at compile-time. */
4100 if (comparison
== UNORDERED
)
4102 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
4103 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
4104 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
4105 temp
, const_true_rtx
, equiv
);
4109 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
4110 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4112 rtx true_rtx
, false_rtx
;
4117 true_rtx
= const0_rtx
;
4118 false_rtx
= const_true_rtx
;
4122 true_rtx
= const_true_rtx
;
4123 false_rtx
= const0_rtx
;
4127 true_rtx
= const1_rtx
;
4128 false_rtx
= const0_rtx
;
4132 true_rtx
= const0_rtx
;
4133 false_rtx
= constm1_rtx
;
4137 true_rtx
= constm1_rtx
;
4138 false_rtx
= const0_rtx
;
4142 true_rtx
= const0_rtx
;
4143 false_rtx
= const1_rtx
;
4149 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
4150 equiv
, true_rtx
, false_rtx
);
4155 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4156 word_mode
, 2, x
, mode
, y
, mode
);
4157 insns
= get_insns ();
4160 target
= gen_reg_rtx (word_mode
);
4161 emit_libcall_block (insns
, target
, value
, equiv
);
4163 if (comparison
== UNORDERED
4164 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4165 comparison
= reversed_p
? EQ
: NE
;
4170 *pcomparison
= comparison
;
4174 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4177 emit_indirect_jump (rtx loc
)
4179 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
4181 loc
= copy_to_mode_reg (Pmode
, loc
);
4183 emit_jump_insn (gen_indirect_jump (loc
));
4187 #ifdef HAVE_conditional_move
4189 /* Emit a conditional move instruction if the machine supports one for that
4190 condition and machine mode.
4192 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4193 the mode to use should they be constants. If it is VOIDmode, they cannot
4196 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4197 should be stored there. MODE is the mode to use should they be constants.
4198 If it is VOIDmode, they cannot both be constants.
4200 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4201 is not supported. */
4204 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4205 enum machine_mode cmode
, rtx op2
, rtx op3
,
4206 enum machine_mode mode
, int unsignedp
)
4208 rtx tem
, subtarget
, comparison
, insn
;
4209 enum insn_code icode
;
4210 enum rtx_code reversed
;
4212 /* If one operand is constant, make it the second one. Only do this
4213 if the other operand is not constant as well. */
4215 if (swap_commutative_operands_p (op0
, op1
))
4220 code
= swap_condition (code
);
4223 /* get_condition will prefer to generate LT and GT even if the old
4224 comparison was against zero, so undo that canonicalization here since
4225 comparisons against zero are cheaper. */
4226 if (code
== LT
&& op1
== const1_rtx
)
4227 code
= LE
, op1
= const0_rtx
;
4228 else if (code
== GT
&& op1
== constm1_rtx
)
4229 code
= GE
, op1
= const0_rtx
;
4231 if (cmode
== VOIDmode
)
4232 cmode
= GET_MODE (op0
);
4234 if (swap_commutative_operands_p (op2
, op3
)
4235 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4244 if (mode
== VOIDmode
)
4245 mode
= GET_MODE (op2
);
4247 icode
= movcc_gen_code
[mode
];
4249 if (icode
== CODE_FOR_nothing
)
4253 target
= gen_reg_rtx (mode
);
4257 /* If the insn doesn't accept these operands, put them in pseudos. */
4259 if (!insn_data
[icode
].operand
[0].predicate
4260 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4261 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4263 if (!insn_data
[icode
].operand
[2].predicate
4264 (op2
, insn_data
[icode
].operand
[2].mode
))
4265 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4267 if (!insn_data
[icode
].operand
[3].predicate
4268 (op3
, insn_data
[icode
].operand
[3].mode
))
4269 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4271 /* Everything should now be in the suitable form, so emit the compare insn
4272 and then the conditional move. */
4275 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4277 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4278 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4279 return NULL and let the caller figure out how best to deal with this
4281 if (GET_CODE (comparison
) != code
)
4284 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4286 /* If that failed, then give up. */
4292 if (subtarget
!= target
)
4293 convert_move (target
, subtarget
, 0);
4298 /* Return nonzero if a conditional move of mode MODE is supported.
4300 This function is for combine so it can tell whether an insn that looks
4301 like a conditional move is actually supported by the hardware. If we
4302 guess wrong we lose a bit on optimization, but that's it. */
4303 /* ??? sparc64 supports conditionally moving integers values based on fp
4304 comparisons, and vice versa. How do we handle them? */
4307 can_conditionally_move_p (enum machine_mode mode
)
4309 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4315 #endif /* HAVE_conditional_move */
4317 /* Emit a conditional addition instruction if the machine supports one for that
4318 condition and machine mode.
4320 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4321 the mode to use should they be constants. If it is VOIDmode, they cannot
4324 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4325 should be stored there. MODE is the mode to use should they be constants.
4326 If it is VOIDmode, they cannot both be constants.
4328 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4329 is not supported. */
4332 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4333 enum machine_mode cmode
, rtx op2
, rtx op3
,
4334 enum machine_mode mode
, int unsignedp
)
4336 rtx tem
, subtarget
, comparison
, insn
;
4337 enum insn_code icode
;
4338 enum rtx_code reversed
;
4340 /* If one operand is constant, make it the second one. Only do this
4341 if the other operand is not constant as well. */
4343 if (swap_commutative_operands_p (op0
, op1
))
4348 code
= swap_condition (code
);
4351 /* get_condition will prefer to generate LT and GT even if the old
4352 comparison was against zero, so undo that canonicalization here since
4353 comparisons against zero are cheaper. */
4354 if (code
== LT
&& op1
== const1_rtx
)
4355 code
= LE
, op1
= const0_rtx
;
4356 else if (code
== GT
&& op1
== constm1_rtx
)
4357 code
= GE
, op1
= const0_rtx
;
4359 if (cmode
== VOIDmode
)
4360 cmode
= GET_MODE (op0
);
4362 if (swap_commutative_operands_p (op2
, op3
)
4363 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4372 if (mode
== VOIDmode
)
4373 mode
= GET_MODE (op2
);
4375 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4377 if (icode
== CODE_FOR_nothing
)
4381 target
= gen_reg_rtx (mode
);
4383 /* If the insn doesn't accept these operands, put them in pseudos. */
4385 if (!insn_data
[icode
].operand
[0].predicate
4386 (target
, insn_data
[icode
].operand
[0].mode
))
4387 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4391 if (!insn_data
[icode
].operand
[2].predicate
4392 (op2
, insn_data
[icode
].operand
[2].mode
))
4393 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4395 if (!insn_data
[icode
].operand
[3].predicate
4396 (op3
, insn_data
[icode
].operand
[3].mode
))
4397 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4399 /* Everything should now be in the suitable form, so emit the compare insn
4400 and then the conditional move. */
4403 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4405 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4406 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4407 return NULL and let the caller figure out how best to deal with this
4409 if (GET_CODE (comparison
) != code
)
4412 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4414 /* If that failed, then give up. */
4420 if (subtarget
!= target
)
4421 convert_move (target
, subtarget
, 0);
4426 /* These functions attempt to generate an insn body, rather than
4427 emitting the insn, but if the gen function already emits them, we
4428 make no attempt to turn them back into naked patterns. */
4430 /* Generate and return an insn body to add Y to X. */
4433 gen_add2_insn (rtx x
, rtx y
)
4435 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4437 gcc_assert (insn_data
[icode
].operand
[0].predicate
4438 (x
, insn_data
[icode
].operand
[0].mode
));
4439 gcc_assert (insn_data
[icode
].operand
[1].predicate
4440 (x
, insn_data
[icode
].operand
[1].mode
));
4441 gcc_assert (insn_data
[icode
].operand
[2].predicate
4442 (y
, insn_data
[icode
].operand
[2].mode
));
4444 return GEN_FCN (icode
) (x
, x
, y
);
4447 /* Generate and return an insn body to add r1 and c,
4448 storing the result in r0. */
4450 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4452 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4454 if (icode
== CODE_FOR_nothing
4455 || !(insn_data
[icode
].operand
[0].predicate
4456 (r0
, insn_data
[icode
].operand
[0].mode
))
4457 || !(insn_data
[icode
].operand
[1].predicate
4458 (r1
, insn_data
[icode
].operand
[1].mode
))
4459 || !(insn_data
[icode
].operand
[2].predicate
4460 (c
, insn_data
[icode
].operand
[2].mode
)))
4463 return GEN_FCN (icode
) (r0
, r1
, c
);
4467 have_add2_insn (rtx x
, rtx y
)
4471 gcc_assert (GET_MODE (x
) != VOIDmode
);
4473 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4475 if (icode
== CODE_FOR_nothing
)
4478 if (!(insn_data
[icode
].operand
[0].predicate
4479 (x
, insn_data
[icode
].operand
[0].mode
))
4480 || !(insn_data
[icode
].operand
[1].predicate
4481 (x
, insn_data
[icode
].operand
[1].mode
))
4482 || !(insn_data
[icode
].operand
[2].predicate
4483 (y
, insn_data
[icode
].operand
[2].mode
)))
4489 /* Generate and return an insn body to subtract Y from X. */
4492 gen_sub2_insn (rtx x
, rtx y
)
4494 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4496 gcc_assert (insn_data
[icode
].operand
[0].predicate
4497 (x
, insn_data
[icode
].operand
[0].mode
));
4498 gcc_assert (insn_data
[icode
].operand
[1].predicate
4499 (x
, insn_data
[icode
].operand
[1].mode
));
4500 gcc_assert (insn_data
[icode
].operand
[2].predicate
4501 (y
, insn_data
[icode
].operand
[2].mode
));
4503 return GEN_FCN (icode
) (x
, x
, y
);
4506 /* Generate and return an insn body to subtract r1 and c,
4507 storing the result in r0. */
4509 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4511 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4513 if (icode
== CODE_FOR_nothing
4514 || !(insn_data
[icode
].operand
[0].predicate
4515 (r0
, insn_data
[icode
].operand
[0].mode
))
4516 || !(insn_data
[icode
].operand
[1].predicate
4517 (r1
, insn_data
[icode
].operand
[1].mode
))
4518 || !(insn_data
[icode
].operand
[2].predicate
4519 (c
, insn_data
[icode
].operand
[2].mode
)))
4522 return GEN_FCN (icode
) (r0
, r1
, c
);
4526 have_sub2_insn (rtx x
, rtx y
)
4530 gcc_assert (GET_MODE (x
) != VOIDmode
);
4532 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4534 if (icode
== CODE_FOR_nothing
)
4537 if (!(insn_data
[icode
].operand
[0].predicate
4538 (x
, insn_data
[icode
].operand
[0].mode
))
4539 || !(insn_data
[icode
].operand
[1].predicate
4540 (x
, insn_data
[icode
].operand
[1].mode
))
4541 || !(insn_data
[icode
].operand
[2].predicate
4542 (y
, insn_data
[icode
].operand
[2].mode
)))
4548 /* Generate the body of an instruction to copy Y into X.
4549 It may be a list of insns, if one insn isn't enough. */
4552 gen_move_insn (rtx x
, rtx y
)
4557 emit_move_insn_1 (x
, y
);
4563 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4564 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4565 no such operation exists, CODE_FOR_nothing will be returned. */
4568 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4572 #ifdef HAVE_ptr_extend
4574 return CODE_FOR_ptr_extend
;
4577 tab
= unsignedp
? zext_optab
: sext_optab
;
4578 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4581 /* Generate the body of an insn to extend Y (with mode MFROM)
4582 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4585 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4586 enum machine_mode mfrom
, int unsignedp
)
4588 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4589 return GEN_FCN (icode
) (x
, y
);
4592 /* can_fix_p and can_float_p say whether the target machine
4593 can directly convert a given fixed point type to
4594 a given floating point type, or vice versa.
4595 The returned value is the CODE_FOR_... value to use,
4596 or CODE_FOR_nothing if these modes cannot be directly converted.
4598 *TRUNCP_PTR is set to 1 if it is necessary to output
4599 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4601 static enum insn_code
4602 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4603 int unsignedp
, int *truncp_ptr
)
4606 enum insn_code icode
;
4608 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4609 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4610 if (icode
!= CODE_FOR_nothing
)
4616 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4617 for this to work. We need to rework the fix* and ftrunc* patterns
4618 and documentation. */
4619 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4620 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4621 if (icode
!= CODE_FOR_nothing
4622 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4629 return CODE_FOR_nothing
;
4632 static enum insn_code
4633 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4638 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4639 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4642 /* Generate code to convert FROM to floating point
4643 and store in TO. FROM must be fixed point and not VOIDmode.
4644 UNSIGNEDP nonzero means regard FROM as unsigned.
4645 Normally this is done by correcting the final value
4646 if it is negative. */
4649 expand_float (rtx to
, rtx from
, int unsignedp
)
4651 enum insn_code icode
;
4653 enum machine_mode fmode
, imode
;
4654 bool can_do_signed
= false;
4656 /* Crash now, because we won't be able to decide which mode to use. */
4657 gcc_assert (GET_MODE (from
) != VOIDmode
);
4659 /* Look for an insn to do the conversion. Do it in the specified
4660 modes if possible; otherwise convert either input, output or both to
4661 wider mode. If the integer mode is wider than the mode of FROM,
4662 we can do the conversion signed even if the input is unsigned. */
4664 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4665 fmode
= GET_MODE_WIDER_MODE (fmode
))
4666 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4667 imode
= GET_MODE_WIDER_MODE (imode
))
4669 int doing_unsigned
= unsignedp
;
4671 if (fmode
!= GET_MODE (to
)
4672 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4675 icode
= can_float_p (fmode
, imode
, unsignedp
);
4676 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4678 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4679 if (scode
!= CODE_FOR_nothing
)
4680 can_do_signed
= true;
4681 if (imode
!= GET_MODE (from
))
4682 icode
= scode
, doing_unsigned
= 0;
4685 if (icode
!= CODE_FOR_nothing
)
4687 if (imode
!= GET_MODE (from
))
4688 from
= convert_to_mode (imode
, from
, unsignedp
);
4690 if (fmode
!= GET_MODE (to
))
4691 target
= gen_reg_rtx (fmode
);
4693 emit_unop_insn (icode
, target
, from
,
4694 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4697 convert_move (to
, target
, 0);
4702 /* Unsigned integer, and no way to convert directly. For binary
4703 floating point modes, convert as signed, then conditionally adjust
4705 if (unsignedp
&& can_do_signed
&& !DECIMAL_FLOAT_MODE_P (GET_MODE (to
)))
4707 rtx label
= gen_label_rtx ();
4709 REAL_VALUE_TYPE offset
;
4711 /* Look for a usable floating mode FMODE wider than the source and at
4712 least as wide as the target. Using FMODE will avoid rounding woes
4713 with unsigned values greater than the signed maximum value. */
4715 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4716 fmode
= GET_MODE_WIDER_MODE (fmode
))
4717 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4718 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4721 if (fmode
== VOIDmode
)
4723 /* There is no such mode. Pretend the target is wide enough. */
4724 fmode
= GET_MODE (to
);
4726 /* Avoid double-rounding when TO is narrower than FROM. */
4727 if ((significand_size (fmode
) + 1)
4728 < GET_MODE_BITSIZE (GET_MODE (from
)))
4731 rtx neglabel
= gen_label_rtx ();
4733 /* Don't use TARGET if it isn't a register, is a hard register,
4734 or is the wrong mode. */
4736 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4737 || GET_MODE (target
) != fmode
)
4738 target
= gen_reg_rtx (fmode
);
4740 imode
= GET_MODE (from
);
4741 do_pending_stack_adjust ();
4743 /* Test whether the sign bit is set. */
4744 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4747 /* The sign bit is not set. Convert as signed. */
4748 expand_float (target
, from
, 0);
4749 emit_jump_insn (gen_jump (label
));
4752 /* The sign bit is set.
4753 Convert to a usable (positive signed) value by shifting right
4754 one bit, while remembering if a nonzero bit was shifted
4755 out; i.e., compute (from & 1) | (from >> 1). */
4757 emit_label (neglabel
);
4758 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4759 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4760 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4762 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4764 expand_float (target
, temp
, 0);
4766 /* Multiply by 2 to undo the shift above. */
4767 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4768 target
, 0, OPTAB_LIB_WIDEN
);
4770 emit_move_insn (target
, temp
);
4772 do_pending_stack_adjust ();
4778 /* If we are about to do some arithmetic to correct for an
4779 unsigned operand, do it in a pseudo-register. */
4781 if (GET_MODE (to
) != fmode
4782 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4783 target
= gen_reg_rtx (fmode
);
4785 /* Convert as signed integer to floating. */
4786 expand_float (target
, from
, 0);
4788 /* If FROM is negative (and therefore TO is negative),
4789 correct its value by 2**bitwidth. */
4791 do_pending_stack_adjust ();
4792 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4796 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4797 temp
= expand_binop (fmode
, add_optab
, target
,
4798 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4799 target
, 0, OPTAB_LIB_WIDEN
);
4801 emit_move_insn (target
, temp
);
4803 do_pending_stack_adjust ();
4808 /* No hardware instruction available; call a library routine. */
4813 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4815 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4816 from
= convert_to_mode (SImode
, from
, unsignedp
);
4818 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4819 gcc_assert (libfunc
);
4823 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4824 GET_MODE (to
), 1, from
,
4826 insns
= get_insns ();
4829 emit_libcall_block (insns
, target
, value
,
4830 gen_rtx_FLOAT (GET_MODE (to
), from
));
4835 /* Copy result to requested destination
4836 if we have been computing in a temp location. */
4840 if (GET_MODE (target
) == GET_MODE (to
))
4841 emit_move_insn (to
, target
);
4843 convert_move (to
, target
, 0);
4847 /* Generate code to convert FROM to fixed point and store in TO. FROM
4848 must be floating point. */
4851 expand_fix (rtx to
, rtx from
, int unsignedp
)
4853 enum insn_code icode
;
4855 enum machine_mode fmode
, imode
;
4858 /* We first try to find a pair of modes, one real and one integer, at
4859 least as wide as FROM and TO, respectively, in which we can open-code
4860 this conversion. If the integer mode is wider than the mode of TO,
4861 we can do the conversion either signed or unsigned. */
4863 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4864 fmode
= GET_MODE_WIDER_MODE (fmode
))
4865 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4866 imode
= GET_MODE_WIDER_MODE (imode
))
4868 int doing_unsigned
= unsignedp
;
4870 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4871 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4872 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4874 if (icode
!= CODE_FOR_nothing
)
4876 if (fmode
!= GET_MODE (from
))
4877 from
= convert_to_mode (fmode
, from
, 0);
4881 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4882 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4886 if (imode
!= GET_MODE (to
))
4887 target
= gen_reg_rtx (imode
);
4889 emit_unop_insn (icode
, target
, from
,
4890 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4892 convert_move (to
, target
, unsignedp
);
4897 /* For an unsigned conversion, there is one more way to do it.
4898 If we have a signed conversion, we generate code that compares
4899 the real value to the largest representable positive number. If if
4900 is smaller, the conversion is done normally. Otherwise, subtract
4901 one plus the highest signed number, convert, and add it back.
4903 We only need to check all real modes, since we know we didn't find
4904 anything with a wider integer mode.
4906 This code used to extend FP value into mode wider than the destination.
4907 This is not needed. Consider, for instance conversion from SFmode
4910 The hot path through the code is dealing with inputs smaller than 2^63
4911 and doing just the conversion, so there is no bits to lose.
4913 In the other path we know the value is positive in the range 2^63..2^64-1
4914 inclusive. (as for other imput overflow happens and result is undefined)
4915 So we know that the most important bit set in mantissa corresponds to
4916 2^63. The subtraction of 2^63 should not generate any rounding as it
4917 simply clears out that bit. The rest is trivial. */
4919 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4920 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4921 fmode
= GET_MODE_WIDER_MODE (fmode
))
4922 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4926 REAL_VALUE_TYPE offset
;
4927 rtx limit
, lab1
, lab2
, insn
;
4929 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4930 real_2expN (&offset
, bitsize
- 1);
4931 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4932 lab1
= gen_label_rtx ();
4933 lab2
= gen_label_rtx ();
4935 if (fmode
!= GET_MODE (from
))
4936 from
= convert_to_mode (fmode
, from
, 0);
4938 /* See if we need to do the subtraction. */
4939 do_pending_stack_adjust ();
4940 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4943 /* If not, do the signed "fix" and branch around fixup code. */
4944 expand_fix (to
, from
, 0);
4945 emit_jump_insn (gen_jump (lab2
));
4948 /* Otherwise, subtract 2**(N-1), convert to signed number,
4949 then add 2**(N-1). Do the addition using XOR since this
4950 will often generate better code. */
4952 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4953 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4954 expand_fix (to
, target
, 0);
4955 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4957 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4959 to
, 1, OPTAB_LIB_WIDEN
);
4962 emit_move_insn (to
, target
);
4966 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4967 != CODE_FOR_nothing
)
4969 /* Make a place for a REG_NOTE and add it. */
4970 insn
= emit_move_insn (to
, to
);
4971 set_unique_reg_note (insn
,
4973 gen_rtx_fmt_e (UNSIGNED_FIX
,
4981 /* We can't do it with an insn, so use a library call. But first ensure
4982 that the mode of TO is at least as wide as SImode, since those are the
4983 only library calls we know about. */
4985 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4987 target
= gen_reg_rtx (SImode
);
4989 expand_fix (target
, from
, unsignedp
);
4997 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4998 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4999 gcc_assert (libfunc
);
5003 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5004 GET_MODE (to
), 1, from
,
5006 insns
= get_insns ();
5009 emit_libcall_block (insns
, target
, value
,
5010 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5011 GET_MODE (to
), from
));
5016 if (GET_MODE (to
) == GET_MODE (target
))
5017 emit_move_insn (to
, target
);
5019 convert_move (to
, target
, 0);
5023 /* Generate code to convert FROM to fixed point and store in TO. FROM
5024 must be floating point, TO must be signed. Use the conversion optab
5025 TAB to do the conversion. */
5028 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5030 enum insn_code icode
;
5032 enum machine_mode fmode
, imode
;
5034 /* We first try to find a pair of modes, one real and one integer, at
5035 least as wide as FROM and TO, respectively, in which we can open-code
5036 this conversion. If the integer mode is wider than the mode of TO,
5037 we can do the conversion either signed or unsigned. */
5039 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5040 fmode
= GET_MODE_WIDER_MODE (fmode
))
5041 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5042 imode
= GET_MODE_WIDER_MODE (imode
))
5044 icode
= tab
->handlers
[imode
][fmode
].insn_code
;
5045 if (icode
!= CODE_FOR_nothing
)
5047 if (fmode
!= GET_MODE (from
))
5048 from
= convert_to_mode (fmode
, from
, 0);
5050 if (imode
!= GET_MODE (to
))
5051 target
= gen_reg_rtx (imode
);
5053 emit_unop_insn (icode
, target
, from
, UNKNOWN
);
5055 convert_move (to
, target
, 0);
5063 /* Report whether we have an instruction to perform the operation
5064 specified by CODE on operands of mode MODE. */
5066 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5068 return (code_to_optab
[(int) code
] != 0
5069 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
5070 != CODE_FOR_nothing
));
5073 /* Create a blank optab. */
5078 optab op
= ggc_alloc (sizeof (struct optab
));
5079 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5081 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
5082 op
->handlers
[i
].libfunc
= 0;
5088 static convert_optab
5089 new_convert_optab (void)
5092 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
5093 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5094 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5096 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
5097 op
->handlers
[i
][j
].libfunc
= 0;
5102 /* Same, but fill in its code as CODE, and write it into the
5103 code_to_optab table. */
5105 init_optab (enum rtx_code code
)
5107 optab op
= new_optab ();
5109 code_to_optab
[(int) code
] = op
;
5113 /* Same, but fill in its code as CODE, and do _not_ write it into
5114 the code_to_optab table. */
5116 init_optabv (enum rtx_code code
)
5118 optab op
= new_optab ();
5123 /* Conversion optabs never go in the code_to_optab table. */
5124 static inline convert_optab
5125 init_convert_optab (enum rtx_code code
)
5127 convert_optab op
= new_convert_optab ();
5132 /* Initialize the libfunc fields of an entire group of entries in some
5133 optab. Each entry is set equal to a string consisting of a leading
5134 pair of underscores followed by a generic operation name followed by
5135 a mode name (downshifted to lowercase) followed by a single character
5136 representing the number of operands for the given operation (which is
5137 usually one of the characters '2', '3', or '4').
5139 OPTABLE is the table in which libfunc fields are to be initialized.
5140 FIRST_MODE is the first machine mode index in the given optab to
5142 LAST_MODE is the last machine mode index in the given optab to
5144 OPNAME is the generic (string) name of the operation.
5145 SUFFIX is the character which specifies the number of operands for
5146 the given generic operation.
5150 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
5151 const char *opname
, int suffix
)
5154 unsigned opname_len
= strlen (opname
);
5156 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5157 mode
= (enum machine_mode
) ((int) mode
+ 1))
5159 const char *mname
= GET_MODE_NAME (mode
);
5160 unsigned mname_len
= strlen (mname
);
5161 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5168 for (q
= opname
; *q
; )
5170 for (q
= mname
; *q
; q
++)
5171 *p
++ = TOLOWER (*q
);
5175 optable
->handlers
[(int) mode
].libfunc
5176 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5180 /* Initialize the libfunc fields of an entire group of entries in some
5181 optab which correspond to all integer mode operations. The parameters
5182 have the same meaning as similarly named ones for the `init_libfuncs'
5183 routine. (See above). */
5186 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
5188 int maxsize
= 2*BITS_PER_WORD
;
5189 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5190 maxsize
= LONG_LONG_TYPE_SIZE
;
5191 init_libfuncs (optable
, word_mode
,
5192 mode_for_size (maxsize
, MODE_INT
, 0),
5196 /* Initialize the libfunc fields of an entire group of entries in some
5197 optab which correspond to all real mode operations. The parameters
5198 have the same meaning as similarly named ones for the `init_libfuncs'
5199 routine. (See above). */
5202 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5204 char *dec_opname
= alloca (sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5206 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5207 depending on the low level floating format used. */
5208 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5209 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5211 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
5212 init_libfuncs (optable
, MIN_MODE_DECIMAL_FLOAT
, MAX_MODE_DECIMAL_FLOAT
,
5213 dec_opname
, suffix
);
5216 /* Initialize the libfunc fields of an entire group of entries of an
5217 inter-mode-class conversion optab. The string formation rules are
5218 similar to the ones for init_libfuncs, above, but instead of having
5219 a mode name and an operand count these functions have two mode names
5220 and no operand count. */
5222 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5223 enum mode_class from_class
,
5224 enum mode_class to_class
)
5226 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
5227 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
5228 size_t opname_len
= strlen (opname
);
5229 size_t max_mname_len
= 0;
5231 enum machine_mode fmode
, tmode
;
5232 const char *fname
, *tname
;
5234 char *libfunc_name
, *suffix
;
5235 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5238 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5239 depends on which underlying decimal floating point format is used. */
5240 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5242 for (fmode
= first_from_mode
;
5244 fmode
= GET_MODE_WIDER_MODE (fmode
))
5245 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
5247 for (tmode
= first_to_mode
;
5249 tmode
= GET_MODE_WIDER_MODE (tmode
))
5250 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
5252 nondec_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5253 nondec_name
[0] = '_';
5254 nondec_name
[1] = '_';
5255 memcpy (&nondec_name
[2], opname
, opname_len
);
5256 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5258 dec_name
= alloca (2 + dec_len
+ opname_len
+ 2*max_mname_len
+ 1 + 1);
5261 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5262 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5263 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5265 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
5266 fmode
= GET_MODE_WIDER_MODE (fmode
))
5267 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
5268 tmode
= GET_MODE_WIDER_MODE (tmode
))
5270 fname
= GET_MODE_NAME (fmode
);
5271 tname
= GET_MODE_NAME (tmode
);
5273 if (DECIMAL_FLOAT_MODE_P(fmode
) || DECIMAL_FLOAT_MODE_P(tmode
))
5275 libfunc_name
= dec_name
;
5276 suffix
= dec_suffix
;
5280 libfunc_name
= nondec_name
;
5281 suffix
= nondec_suffix
;
5285 for (q
= fname
; *q
; p
++, q
++)
5287 for (q
= tname
; *q
; p
++, q
++)
5292 tab
->handlers
[tmode
][fmode
].libfunc
5293 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5298 /* Initialize the libfunc fields of an entire group of entries of an
5299 intra-mode-class conversion optab. The string formation rules are
5300 similar to the ones for init_libfunc, above. WIDENING says whether
5301 the optab goes from narrow to wide modes or vice versa. These functions
5302 have two mode names _and_ an operand count. */
5304 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5305 enum mode_class
class, bool widening
)
5307 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
5308 size_t opname_len
= strlen (opname
);
5309 size_t max_mname_len
= 0;
5311 enum machine_mode nmode
, wmode
;
5312 const char *nname
, *wname
;
5314 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5315 char *libfunc_name
, *suffix
;
5318 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5319 depends on which underlying decimal floating point format is used. */
5320 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5322 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5323 nmode
= GET_MODE_WIDER_MODE (nmode
))
5324 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
5326 nondec_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5327 nondec_name
[0] = '_';
5328 nondec_name
[1] = '_';
5329 memcpy (&nondec_name
[2], opname
, opname_len
);
5330 nondec_suffix
= nondec_name
+ opname_len
+ 2;
5332 dec_name
= alloca (2 + dec_len
+ opname_len
+ 2*max_mname_len
+ 1 + 1);
5335 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5336 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5337 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5339 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5340 nmode
= GET_MODE_WIDER_MODE (nmode
))
5341 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
5342 wmode
= GET_MODE_WIDER_MODE (wmode
))
5344 nname
= GET_MODE_NAME (nmode
);
5345 wname
= GET_MODE_NAME (wmode
);
5347 if (DECIMAL_FLOAT_MODE_P(nmode
) || DECIMAL_FLOAT_MODE_P(wmode
))
5349 libfunc_name
= dec_name
;
5350 suffix
= dec_suffix
;
5354 libfunc_name
= nondec_name
;
5355 suffix
= nondec_suffix
;
5359 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
5361 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
5367 tab
->handlers
[widening
? wmode
: nmode
]
5368 [widening
? nmode
: wmode
].libfunc
5369 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5376 init_one_libfunc (const char *name
)
5380 /* Create a FUNCTION_DECL that can be passed to
5381 targetm.encode_section_info. */
5382 /* ??? We don't have any type information except for this is
5383 a function. Pretend this is "int foo()". */
5384 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5385 build_function_type (integer_type_node
, NULL_TREE
));
5386 DECL_ARTIFICIAL (decl
) = 1;
5387 DECL_EXTERNAL (decl
) = 1;
5388 TREE_PUBLIC (decl
) = 1;
5390 symbol
= XEXP (DECL_RTL (decl
), 0);
5392 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5393 are the flags assigned by targetm.encode_section_info. */
5394 SET_SYMBOL_REF_DECL (symbol
, 0);
5399 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5400 MODE to NAME, which should be either 0 or a string constant. */
5402 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5405 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
5407 optable
->handlers
[mode
].libfunc
= 0;
5410 /* Call this to reset the function entry for one conversion optab
5411 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5412 either 0 or a string constant. */
5414 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5415 enum machine_mode fmode
, const char *name
)
5418 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
5420 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
5423 /* Call this once to initialize the contents of the optabs
5424 appropriately for the current target machine. */
5430 enum machine_mode int_mode
;
5432 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5434 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5435 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5437 #ifdef HAVE_conditional_move
5438 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5439 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5442 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5444 vcond_gen_code
[i
] = CODE_FOR_nothing
;
5445 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
5448 add_optab
= init_optab (PLUS
);
5449 addv_optab
= init_optabv (PLUS
);
5450 sub_optab
= init_optab (MINUS
);
5451 subv_optab
= init_optabv (MINUS
);
5452 smul_optab
= init_optab (MULT
);
5453 smulv_optab
= init_optabv (MULT
);
5454 smul_highpart_optab
= init_optab (UNKNOWN
);
5455 umul_highpart_optab
= init_optab (UNKNOWN
);
5456 smul_widen_optab
= init_optab (UNKNOWN
);
5457 umul_widen_optab
= init_optab (UNKNOWN
);
5458 usmul_widen_optab
= init_optab (UNKNOWN
);
5459 smadd_widen_optab
= init_optab (UNKNOWN
);
5460 umadd_widen_optab
= init_optab (UNKNOWN
);
5461 smsub_widen_optab
= init_optab (UNKNOWN
);
5462 umsub_widen_optab
= init_optab (UNKNOWN
);
5463 sdiv_optab
= init_optab (DIV
);
5464 sdivv_optab
= init_optabv (DIV
);
5465 sdivmod_optab
= init_optab (UNKNOWN
);
5466 udiv_optab
= init_optab (UDIV
);
5467 udivmod_optab
= init_optab (UNKNOWN
);
5468 smod_optab
= init_optab (MOD
);
5469 umod_optab
= init_optab (UMOD
);
5470 fmod_optab
= init_optab (UNKNOWN
);
5471 remainder_optab
= init_optab (UNKNOWN
);
5472 ftrunc_optab
= init_optab (UNKNOWN
);
5473 and_optab
= init_optab (AND
);
5474 ior_optab
= init_optab (IOR
);
5475 xor_optab
= init_optab (XOR
);
5476 ashl_optab
= init_optab (ASHIFT
);
5477 ashr_optab
= init_optab (ASHIFTRT
);
5478 lshr_optab
= init_optab (LSHIFTRT
);
5479 rotl_optab
= init_optab (ROTATE
);
5480 rotr_optab
= init_optab (ROTATERT
);
5481 smin_optab
= init_optab (SMIN
);
5482 smax_optab
= init_optab (SMAX
);
5483 umin_optab
= init_optab (UMIN
);
5484 umax_optab
= init_optab (UMAX
);
5485 pow_optab
= init_optab (UNKNOWN
);
5486 atan2_optab
= init_optab (UNKNOWN
);
5488 /* These three have codes assigned exclusively for the sake of
5490 mov_optab
= init_optab (SET
);
5491 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5492 cmp_optab
= init_optab (COMPARE
);
5494 ucmp_optab
= init_optab (UNKNOWN
);
5495 tst_optab
= init_optab (UNKNOWN
);
5497 eq_optab
= init_optab (EQ
);
5498 ne_optab
= init_optab (NE
);
5499 gt_optab
= init_optab (GT
);
5500 ge_optab
= init_optab (GE
);
5501 lt_optab
= init_optab (LT
);
5502 le_optab
= init_optab (LE
);
5503 unord_optab
= init_optab (UNORDERED
);
5505 neg_optab
= init_optab (NEG
);
5506 negv_optab
= init_optabv (NEG
);
5507 abs_optab
= init_optab (ABS
);
5508 absv_optab
= init_optabv (ABS
);
5509 addcc_optab
= init_optab (UNKNOWN
);
5510 one_cmpl_optab
= init_optab (NOT
);
5511 bswap_optab
= init_optab (BSWAP
);
5512 ffs_optab
= init_optab (FFS
);
5513 clz_optab
= init_optab (CLZ
);
5514 ctz_optab
= init_optab (CTZ
);
5515 popcount_optab
= init_optab (POPCOUNT
);
5516 parity_optab
= init_optab (PARITY
);
5517 sqrt_optab
= init_optab (SQRT
);
5518 floor_optab
= init_optab (UNKNOWN
);
5519 ceil_optab
= init_optab (UNKNOWN
);
5520 round_optab
= init_optab (UNKNOWN
);
5521 btrunc_optab
= init_optab (UNKNOWN
);
5522 nearbyint_optab
= init_optab (UNKNOWN
);
5523 rint_optab
= init_optab (UNKNOWN
);
5524 sincos_optab
= init_optab (UNKNOWN
);
5525 sin_optab
= init_optab (UNKNOWN
);
5526 asin_optab
= init_optab (UNKNOWN
);
5527 cos_optab
= init_optab (UNKNOWN
);
5528 acos_optab
= init_optab (UNKNOWN
);
5529 exp_optab
= init_optab (UNKNOWN
);
5530 exp10_optab
= init_optab (UNKNOWN
);
5531 exp2_optab
= init_optab (UNKNOWN
);
5532 expm1_optab
= init_optab (UNKNOWN
);
5533 ldexp_optab
= init_optab (UNKNOWN
);
5534 scalb_optab
= init_optab (UNKNOWN
);
5535 logb_optab
= init_optab (UNKNOWN
);
5536 ilogb_optab
= init_optab (UNKNOWN
);
5537 log_optab
= init_optab (UNKNOWN
);
5538 log10_optab
= init_optab (UNKNOWN
);
5539 log2_optab
= init_optab (UNKNOWN
);
5540 log1p_optab
= init_optab (UNKNOWN
);
5541 tan_optab
= init_optab (UNKNOWN
);
5542 atan_optab
= init_optab (UNKNOWN
);
5543 copysign_optab
= init_optab (UNKNOWN
);
5545 isinf_optab
= init_optab (UNKNOWN
);
5547 strlen_optab
= init_optab (UNKNOWN
);
5548 cbranch_optab
= init_optab (UNKNOWN
);
5549 cmov_optab
= init_optab (UNKNOWN
);
5550 cstore_optab
= init_optab (UNKNOWN
);
5551 push_optab
= init_optab (UNKNOWN
);
5553 reduc_smax_optab
= init_optab (UNKNOWN
);
5554 reduc_umax_optab
= init_optab (UNKNOWN
);
5555 reduc_smin_optab
= init_optab (UNKNOWN
);
5556 reduc_umin_optab
= init_optab (UNKNOWN
);
5557 reduc_splus_optab
= init_optab (UNKNOWN
);
5558 reduc_uplus_optab
= init_optab (UNKNOWN
);
5560 ssum_widen_optab
= init_optab (UNKNOWN
);
5561 usum_widen_optab
= init_optab (UNKNOWN
);
5562 sdot_prod_optab
= init_optab (UNKNOWN
);
5563 udot_prod_optab
= init_optab (UNKNOWN
);
5565 vec_extract_optab
= init_optab (UNKNOWN
);
5566 vec_extract_even_optab
= init_optab (UNKNOWN
);
5567 vec_extract_odd_optab
= init_optab (UNKNOWN
);
5568 vec_interleave_high_optab
= init_optab (UNKNOWN
);
5569 vec_interleave_low_optab
= init_optab (UNKNOWN
);
5570 vec_set_optab
= init_optab (UNKNOWN
);
5571 vec_init_optab
= init_optab (UNKNOWN
);
5572 vec_shl_optab
= init_optab (UNKNOWN
);
5573 vec_shr_optab
= init_optab (UNKNOWN
);
5574 vec_realign_load_optab
= init_optab (UNKNOWN
);
5575 movmisalign_optab
= init_optab (UNKNOWN
);
5576 vec_widen_umult_hi_optab
= init_optab (UNKNOWN
);
5577 vec_widen_umult_lo_optab
= init_optab (UNKNOWN
);
5578 vec_widen_smult_hi_optab
= init_optab (UNKNOWN
);
5579 vec_widen_smult_lo_optab
= init_optab (UNKNOWN
);
5580 vec_unpacks_hi_optab
= init_optab (UNKNOWN
);
5581 vec_unpacks_lo_optab
= init_optab (UNKNOWN
);
5582 vec_unpacku_hi_optab
= init_optab (UNKNOWN
);
5583 vec_unpacku_lo_optab
= init_optab (UNKNOWN
);
5584 vec_unpacks_float_hi_optab
= init_optab (UNKNOWN
);
5585 vec_unpacks_float_lo_optab
= init_optab (UNKNOWN
);
5586 vec_unpacku_float_hi_optab
= init_optab (UNKNOWN
);
5587 vec_unpacku_float_lo_optab
= init_optab (UNKNOWN
);
5588 vec_pack_trunc_optab
= init_optab (UNKNOWN
);
5589 vec_pack_usat_optab
= init_optab (UNKNOWN
);
5590 vec_pack_ssat_optab
= init_optab (UNKNOWN
);
5591 vec_pack_ufix_trunc_optab
= init_optab (UNKNOWN
);
5592 vec_pack_sfix_trunc_optab
= init_optab (UNKNOWN
);
5594 powi_optab
= init_optab (UNKNOWN
);
5597 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5598 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5599 trunc_optab
= init_convert_optab (TRUNCATE
);
5600 sfix_optab
= init_convert_optab (FIX
);
5601 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5602 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5603 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5604 sfloat_optab
= init_convert_optab (FLOAT
);
5605 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5606 lrint_optab
= init_convert_optab (UNKNOWN
);
5607 lround_optab
= init_convert_optab (UNKNOWN
);
5608 lfloor_optab
= init_convert_optab (UNKNOWN
);
5609 lceil_optab
= init_convert_optab (UNKNOWN
);
5611 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5613 movmem_optab
[i
] = CODE_FOR_nothing
;
5614 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5615 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
5616 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5617 setmem_optab
[i
] = CODE_FOR_nothing
;
5619 sync_add_optab
[i
] = CODE_FOR_nothing
;
5620 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5621 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5622 sync_and_optab
[i
] = CODE_FOR_nothing
;
5623 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5624 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5625 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5626 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5627 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5628 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5629 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5630 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5631 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5632 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5633 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5634 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5635 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5636 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5637 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5638 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5639 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5640 sync_lock_release
[i
] = CODE_FOR_nothing
;
5642 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5645 /* Fill in the optabs with the insns we support. */
5648 /* The ffs function operates on `int'. Fall back on it if we do not
5649 have a libgcc2 function for that width. */
5650 int_mode
= mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0);
5651 ffs_optab
->handlers
[(int) int_mode
].libfunc
= init_one_libfunc ("ffs");
5653 /* Initialize the optabs with the names of the library functions. */
5654 init_integral_libfuncs (add_optab
, "add", '3');
5655 init_floating_libfuncs (add_optab
, "add", '3');
5656 init_integral_libfuncs (addv_optab
, "addv", '3');
5657 init_floating_libfuncs (addv_optab
, "add", '3');
5658 init_integral_libfuncs (sub_optab
, "sub", '3');
5659 init_floating_libfuncs (sub_optab
, "sub", '3');
5660 init_integral_libfuncs (subv_optab
, "subv", '3');
5661 init_floating_libfuncs (subv_optab
, "sub", '3');
5662 init_integral_libfuncs (smul_optab
, "mul", '3');
5663 init_floating_libfuncs (smul_optab
, "mul", '3');
5664 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5665 init_floating_libfuncs (smulv_optab
, "mul", '3');
5666 init_integral_libfuncs (sdiv_optab
, "div", '3');
5667 init_floating_libfuncs (sdiv_optab
, "div", '3');
5668 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5669 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5670 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5671 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5672 init_integral_libfuncs (smod_optab
, "mod", '3');
5673 init_integral_libfuncs (umod_optab
, "umod", '3');
5674 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5675 init_integral_libfuncs (and_optab
, "and", '3');
5676 init_integral_libfuncs (ior_optab
, "ior", '3');
5677 init_integral_libfuncs (xor_optab
, "xor", '3');
5678 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5679 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5680 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5681 init_integral_libfuncs (smin_optab
, "min", '3');
5682 init_floating_libfuncs (smin_optab
, "min", '3');
5683 init_integral_libfuncs (smax_optab
, "max", '3');
5684 init_floating_libfuncs (smax_optab
, "max", '3');
5685 init_integral_libfuncs (umin_optab
, "umin", '3');
5686 init_integral_libfuncs (umax_optab
, "umax", '3');
5687 init_integral_libfuncs (neg_optab
, "neg", '2');
5688 init_floating_libfuncs (neg_optab
, "neg", '2');
5689 init_integral_libfuncs (negv_optab
, "negv", '2');
5690 init_floating_libfuncs (negv_optab
, "neg", '2');
5691 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5692 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5693 init_integral_libfuncs (clz_optab
, "clz", '2');
5694 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5695 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5696 init_integral_libfuncs (parity_optab
, "parity", '2');
5698 /* Comparison libcalls for integers MUST come in pairs,
5700 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5701 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5702 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5704 /* EQ etc are floating point only. */
5705 init_floating_libfuncs (eq_optab
, "eq", '2');
5706 init_floating_libfuncs (ne_optab
, "ne", '2');
5707 init_floating_libfuncs (gt_optab
, "gt", '2');
5708 init_floating_libfuncs (ge_optab
, "ge", '2');
5709 init_floating_libfuncs (lt_optab
, "lt", '2');
5710 init_floating_libfuncs (le_optab
, "le", '2');
5711 init_floating_libfuncs (unord_optab
, "unord", '2');
5713 init_floating_libfuncs (powi_optab
, "powi", '2');
5716 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5717 MODE_INT
, MODE_FLOAT
);
5718 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5719 MODE_INT
, MODE_DECIMAL_FLOAT
);
5720 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5721 MODE_INT
, MODE_FLOAT
);
5722 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5723 MODE_INT
, MODE_DECIMAL_FLOAT
);
5724 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5725 MODE_FLOAT
, MODE_INT
);
5726 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5727 MODE_DECIMAL_FLOAT
, MODE_INT
);
5728 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5729 MODE_FLOAT
, MODE_INT
);
5730 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5731 MODE_DECIMAL_FLOAT
, MODE_INT
);
5732 init_interclass_conv_libfuncs (ufloat_optab
, "floatuns",
5733 MODE_INT
, MODE_DECIMAL_FLOAT
);
5734 init_interclass_conv_libfuncs (lrint_optab
, "lrint",
5735 MODE_INT
, MODE_FLOAT
);
5736 init_interclass_conv_libfuncs (lround_optab
, "lround",
5737 MODE_INT
, MODE_FLOAT
);
5738 init_interclass_conv_libfuncs (lfloor_optab
, "lfloor",
5739 MODE_INT
, MODE_FLOAT
);
5740 init_interclass_conv_libfuncs (lceil_optab
, "lceil",
5741 MODE_INT
, MODE_FLOAT
);
5743 /* sext_optab is also used for FLOAT_EXTEND. */
5744 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5745 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_DECIMAL_FLOAT
, true);
5746 init_interclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, MODE_DECIMAL_FLOAT
);
5747 init_interclass_conv_libfuncs (sext_optab
, "extend", MODE_DECIMAL_FLOAT
, MODE_FLOAT
);
5748 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5749 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_DECIMAL_FLOAT
, false);
5750 init_interclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, MODE_DECIMAL_FLOAT
);
5751 init_interclass_conv_libfuncs (trunc_optab
, "trunc", MODE_DECIMAL_FLOAT
, MODE_FLOAT
);
5753 /* Explicitly initialize the bswap libfuncs since we need them to be
5754 valid for things other than word_mode. */
5755 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
5756 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
5758 /* Use cabs for double complex abs, since systems generally have cabs.
5759 Don't define any libcall for float complex, so that cabs will be used. */
5760 if (complex_double_type_node
)
5761 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5762 = init_one_libfunc ("cabs");
5764 abort_libfunc
= init_one_libfunc ("abort");
5765 memcpy_libfunc
= init_one_libfunc ("memcpy");
5766 memmove_libfunc
= init_one_libfunc ("memmove");
5767 memcmp_libfunc
= init_one_libfunc ("memcmp");
5768 memset_libfunc
= init_one_libfunc ("memset");
5769 setbits_libfunc
= init_one_libfunc ("__setbits");
5771 #ifndef DONT_USE_BUILTIN_SETJMP
5772 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5773 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5775 setjmp_libfunc
= init_one_libfunc ("setjmp");
5776 longjmp_libfunc
= init_one_libfunc ("longjmp");
5778 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5779 unwind_sjlj_unregister_libfunc
5780 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5782 /* For function entry/exit instrumentation. */
5783 profile_function_entry_libfunc
5784 = init_one_libfunc ("__cyg_profile_func_enter");
5785 profile_function_exit_libfunc
5786 = init_one_libfunc ("__cyg_profile_func_exit");
5788 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5790 if (HAVE_conditional_trap
)
5791 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5793 /* Allow the target to add more libcalls or rename some, etc. */
5794 targetm
.init_libfuncs ();
5799 /* Print information about the current contents of the optabs on
5803 debug_optab_libfuncs (void)
5809 /* Dump the arithmetic optabs. */
5810 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5811 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5814 struct optab_handlers
*h
;
5817 h
= &o
->handlers
[j
];
5820 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5821 fprintf (stderr
, "%s\t%s:\t%s\n",
5822 GET_RTX_NAME (o
->code
),
5824 XSTR (h
->libfunc
, 0));
5828 /* Dump the conversion optabs. */
5829 for (i
= 0; i
< (int) COI_MAX
; ++i
)
5830 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5831 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5834 struct optab_handlers
*h
;
5836 o
= &convert_optab_table
[i
];
5837 h
= &o
->handlers
[j
][k
];
5840 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5841 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5842 GET_RTX_NAME (o
->code
),
5845 XSTR (h
->libfunc
, 0));
5853 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5854 CODE. Return 0 on failure. */
5857 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5858 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5860 enum machine_mode mode
= GET_MODE (op1
);
5861 enum insn_code icode
;
5864 if (!HAVE_conditional_trap
)
5867 if (mode
== VOIDmode
)
5870 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5871 if (icode
== CODE_FOR_nothing
)
5875 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5876 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5882 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5884 PUT_CODE (trap_rtx
, code
);
5885 gcc_assert (HAVE_conditional_trap
);
5886 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5890 insn
= get_insns ();
5897 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5898 or unsigned operation code. */
5900 static enum rtx_code
5901 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5913 code
= unsignedp
? LTU
: LT
;
5916 code
= unsignedp
? LEU
: LE
;
5919 code
= unsignedp
? GTU
: GT
;
5922 code
= unsignedp
? GEU
: GE
;
5925 case UNORDERED_EXPR
:
5956 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5957 unsigned operators. Do not generate compare instruction. */
5960 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5962 enum rtx_code rcode
;
5964 rtx rtx_op0
, rtx_op1
;
5966 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5967 ensures that condition is a relational operation. */
5968 gcc_assert (COMPARISON_CLASS_P (cond
));
5970 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5971 t_op0
= TREE_OPERAND (cond
, 0);
5972 t_op1
= TREE_OPERAND (cond
, 1);
5974 /* Expand operands. */
5975 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5977 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5980 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5981 && GET_MODE (rtx_op0
) != VOIDmode
)
5982 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5984 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5985 && GET_MODE (rtx_op1
) != VOIDmode
)
5986 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5988 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5991 /* Return insn code for VEC_COND_EXPR EXPR. */
5993 static inline enum insn_code
5994 get_vcond_icode (tree expr
, enum machine_mode mode
)
5996 enum insn_code icode
= CODE_FOR_nothing
;
5998 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5999 icode
= vcondu_gen_code
[mode
];
6001 icode
= vcond_gen_code
[mode
];
6005 /* Return TRUE iff, appropriate vector insns are available
6006 for vector cond expr expr in VMODE mode. */
6009 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
6011 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
6016 /* Generate insns for VEC_COND_EXPR. */
6019 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
6021 enum insn_code icode
;
6022 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
6023 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
6024 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
6026 icode
= get_vcond_icode (vec_cond_expr
, mode
);
6027 if (icode
== CODE_FOR_nothing
)
6030 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6031 target
= gen_reg_rtx (mode
);
6033 /* Get comparison rtx. First expand both cond expr operands. */
6034 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
6036 cc_op0
= XEXP (comparison
, 0);
6037 cc_op1
= XEXP (comparison
, 1);
6038 /* Expand both operands and force them in reg, if required. */
6039 rtx_op1
= expand_normal (TREE_OPERAND (vec_cond_expr
, 1));
6040 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
6041 && mode
!= VOIDmode
)
6042 rtx_op1
= force_reg (mode
, rtx_op1
);
6044 rtx_op2
= expand_normal (TREE_OPERAND (vec_cond_expr
, 2));
6045 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
6046 && mode
!= VOIDmode
)
6047 rtx_op2
= force_reg (mode
, rtx_op2
);
6049 /* Emit instruction! */
6050 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
6051 comparison
, cc_op0
, cc_op1
));
6057 /* This is an internal subroutine of the other compare_and_swap expanders.
6058 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6059 operation. TARGET is an optional place to store the value result of
6060 the operation. ICODE is the particular instruction to expand. Return
6061 the result of the operation. */
6064 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
6065 rtx target
, enum insn_code icode
)
6067 enum machine_mode mode
= GET_MODE (mem
);
6070 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6071 target
= gen_reg_rtx (mode
);
6073 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
6074 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
6075 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
6076 old_val
= force_reg (mode
, old_val
);
6078 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
6079 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
6080 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
6081 new_val
= force_reg (mode
, new_val
);
6083 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
6084 if (insn
== NULL_RTX
)
6091 /* Expand a compare-and-swap operation and return its value. */
6094 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
6096 enum machine_mode mode
= GET_MODE (mem
);
6097 enum insn_code icode
= sync_compare_and_swap
[mode
];
6099 if (icode
== CODE_FOR_nothing
)
6102 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
6105 /* Expand a compare-and-swap operation and store true into the result if
6106 the operation was successful and false otherwise. Return the result.
6107 Unlike other routines, TARGET is not optional. */
6110 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
6112 enum machine_mode mode
= GET_MODE (mem
);
6113 enum insn_code icode
;
6114 rtx subtarget
, label0
, label1
;
6116 /* If the target supports a compare-and-swap pattern that simultaneously
6117 sets some flag for success, then use it. Otherwise use the regular
6118 compare-and-swap and follow that immediately with a compare insn. */
6119 icode
= sync_compare_and_swap_cc
[mode
];
6123 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
6125 if (subtarget
!= NULL_RTX
)
6129 case CODE_FOR_nothing
:
6130 icode
= sync_compare_and_swap
[mode
];
6131 if (icode
== CODE_FOR_nothing
)
6134 /* Ensure that if old_val == mem, that we're not comparing
6135 against an old value. */
6136 if (MEM_P (old_val
))
6137 old_val
= force_reg (mode
, old_val
);
6139 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
6141 if (subtarget
== NULL_RTX
)
6144 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
6147 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6148 setcc instruction from the beginning. We don't work too hard here,
6149 but it's nice to not be stupid about initial code gen either. */
6150 if (STORE_FLAG_VALUE
== 1)
6152 icode
= setcc_gen_code
[EQ
];
6153 if (icode
!= CODE_FOR_nothing
)
6155 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
6159 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
6160 subtarget
= gen_reg_rtx (cmode
);
6162 insn
= GEN_FCN (icode
) (subtarget
);
6166 if (GET_MODE (target
) != GET_MODE (subtarget
))
6168 convert_move (target
, subtarget
, 1);
6176 /* Without an appropriate setcc instruction, use a set of branches to
6177 get 1 and 0 stored into target. Presumably if the target has a
6178 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6180 label0
= gen_label_rtx ();
6181 label1
= gen_label_rtx ();
6183 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
6184 emit_move_insn (target
, const0_rtx
);
6185 emit_jump_insn (gen_jump (label1
));
6187 emit_label (label0
);
6188 emit_move_insn (target
, const1_rtx
);
6189 emit_label (label1
);
6194 /* This is a helper function for the other atomic operations. This function
6195 emits a loop that contains SEQ that iterates until a compare-and-swap
6196 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6197 a set of instructions that takes a value from OLD_REG as an input and
6198 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6199 set to the current contents of MEM. After SEQ, a compare-and-swap will
6200 attempt to update MEM with NEW_REG. The function returns true when the
6201 loop was generated successfully. */
6204 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6206 enum machine_mode mode
= GET_MODE (mem
);
6207 enum insn_code icode
;
6208 rtx label
, cmp_reg
, subtarget
;
6210 /* The loop we want to generate looks like
6216 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6217 if (cmp_reg != old_reg)
6220 Note that we only do the plain load from memory once. Subsequent
6221 iterations use the value loaded by the compare-and-swap pattern. */
6223 label
= gen_label_rtx ();
6224 cmp_reg
= gen_reg_rtx (mode
);
6226 emit_move_insn (cmp_reg
, mem
);
6228 emit_move_insn (old_reg
, cmp_reg
);
6232 /* If the target supports a compare-and-swap pattern that simultaneously
6233 sets some flag for success, then use it. Otherwise use the regular
6234 compare-and-swap and follow that immediately with a compare insn. */
6235 icode
= sync_compare_and_swap_cc
[mode
];
6239 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6241 if (subtarget
!= NULL_RTX
)
6243 gcc_assert (subtarget
== cmp_reg
);
6248 case CODE_FOR_nothing
:
6249 icode
= sync_compare_and_swap
[mode
];
6250 if (icode
== CODE_FOR_nothing
)
6253 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
6255 if (subtarget
== NULL_RTX
)
6257 if (subtarget
!= cmp_reg
)
6258 emit_move_insn (cmp_reg
, subtarget
);
6260 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
6263 /* ??? Mark this jump predicted not taken? */
6264 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
6269 /* This function generates the atomic operation MEM CODE= VAL. In this
6270 case, we do not care about any resulting value. Returns NULL if we
6271 cannot generate the operation. */
6274 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
6276 enum machine_mode mode
= GET_MODE (mem
);
6277 enum insn_code icode
;
6280 /* Look to see if the target supports the operation directly. */
6284 icode
= sync_add_optab
[mode
];
6287 icode
= sync_ior_optab
[mode
];
6290 icode
= sync_xor_optab
[mode
];
6293 icode
= sync_and_optab
[mode
];
6296 icode
= sync_nand_optab
[mode
];
6300 icode
= sync_sub_optab
[mode
];
6301 if (icode
== CODE_FOR_nothing
)
6303 icode
= sync_add_optab
[mode
];
6304 if (icode
!= CODE_FOR_nothing
)
6306 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6316 /* Generate the direct operation, if present. */
6317 if (icode
!= CODE_FOR_nothing
)
6319 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6320 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6321 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
6322 val
= force_reg (mode
, val
);
6324 insn
= GEN_FCN (icode
) (mem
, val
);
6332 /* Failing that, generate a compare-and-swap loop in which we perform the
6333 operation with normal arithmetic instructions. */
6334 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6336 rtx t0
= gen_reg_rtx (mode
), t1
;
6343 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6346 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6347 true, OPTAB_LIB_WIDEN
);
6349 insn
= get_insns ();
6352 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6359 /* This function generates the atomic operation MEM CODE= VAL. In this
6360 case, we do care about the resulting value: if AFTER is true then
6361 return the value MEM holds after the operation, if AFTER is false
6362 then return the value MEM holds before the operation. TARGET is an
6363 optional place for the result value to be stored. */
6366 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
6367 bool after
, rtx target
)
6369 enum machine_mode mode
= GET_MODE (mem
);
6370 enum insn_code old_code
, new_code
, icode
;
6374 /* Look to see if the target supports the operation directly. */
6378 old_code
= sync_old_add_optab
[mode
];
6379 new_code
= sync_new_add_optab
[mode
];
6382 old_code
= sync_old_ior_optab
[mode
];
6383 new_code
= sync_new_ior_optab
[mode
];
6386 old_code
= sync_old_xor_optab
[mode
];
6387 new_code
= sync_new_xor_optab
[mode
];
6390 old_code
= sync_old_and_optab
[mode
];
6391 new_code
= sync_new_and_optab
[mode
];
6394 old_code
= sync_old_nand_optab
[mode
];
6395 new_code
= sync_new_nand_optab
[mode
];
6399 old_code
= sync_old_sub_optab
[mode
];
6400 new_code
= sync_new_sub_optab
[mode
];
6401 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
6403 old_code
= sync_old_add_optab
[mode
];
6404 new_code
= sync_new_add_optab
[mode
];
6405 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
6407 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
6417 /* If the target does supports the proper new/old operation, great. But
6418 if we only support the opposite old/new operation, check to see if we
6419 can compensate. In the case in which the old value is supported, then
6420 we can always perform the operation again with normal arithmetic. In
6421 the case in which the new value is supported, then we can only handle
6422 this in the case the operation is reversible. */
6427 if (icode
== CODE_FOR_nothing
)
6430 if (icode
!= CODE_FOR_nothing
)
6437 if (icode
== CODE_FOR_nothing
6438 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
6441 if (icode
!= CODE_FOR_nothing
)
6446 /* If we found something supported, great. */
6447 if (icode
!= CODE_FOR_nothing
)
6449 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6450 target
= gen_reg_rtx (mode
);
6452 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6453 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6454 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6455 val
= force_reg (mode
, val
);
6457 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6462 /* If we need to compensate for using an operation with the
6463 wrong return value, do so now. */
6470 else if (code
== MINUS
)
6475 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
6476 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
6477 true, OPTAB_LIB_WIDEN
);
6484 /* Failing that, generate a compare-and-swap loop in which we perform the
6485 operation with normal arithmetic instructions. */
6486 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6488 rtx t0
= gen_reg_rtx (mode
), t1
;
6490 if (!target
|| !register_operand (target
, mode
))
6491 target
= gen_reg_rtx (mode
);
6496 emit_move_insn (target
, t0
);
6500 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6503 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6504 true, OPTAB_LIB_WIDEN
);
6506 emit_move_insn (target
, t1
);
6508 insn
= get_insns ();
6511 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6518 /* This function expands a test-and-set operation. Ideally we atomically
6519 store VAL in MEM and return the previous value in MEM. Some targets
6520 may not support this operation and only support VAL with the constant 1;
6521 in this case while the return value will be 0/1, but the exact value
6522 stored in MEM is target defined. TARGET is an option place to stick
6523 the return value. */
6526 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
6528 enum machine_mode mode
= GET_MODE (mem
);
6529 enum insn_code icode
;
6532 /* If the target supports the test-and-set directly, great. */
6533 icode
= sync_lock_test_and_set
[mode
];
6534 if (icode
!= CODE_FOR_nothing
)
6536 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6537 target
= gen_reg_rtx (mode
);
6539 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6540 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6541 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6542 val
= force_reg (mode
, val
);
6544 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6552 /* Otherwise, use a compare-and-swap loop for the exchange. */
6553 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6555 if (!target
|| !register_operand (target
, mode
))
6556 target
= gen_reg_rtx (mode
);
6557 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6558 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6559 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6566 #include "gt-optabs.h"