1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
27 /* Include insn-config.h before expr.h so that HAVE_conditional_move
28 is properly defined. */
29 #include "insn-config.h"
32 #include "tree-hasher.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
46 #include "basic-block.h"
49 struct target_optabs default_target_optabs
;
50 struct target_libfuncs default_target_libfuncs
;
51 struct target_optabs
*this_fn_optabs
= &default_target_optabs
;
53 struct target_optabs
*this_target_optabs
= &default_target_optabs
;
54 struct target_libfuncs
*this_target_libfuncs
= &default_target_libfuncs
;
57 #define libfunc_hash \
58 (this_target_libfuncs->x_libfunc_hash)
60 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
62 static rtx
expand_unop_direct (enum machine_mode
, optab
, rtx
, rtx
, int);
63 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
65 /* Debug facility for use in GDB. */
66 void debug_optab_libfuncs (void);
68 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
69 #if ENABLE_DECIMAL_BID_FORMAT
70 #define DECIMAL_PREFIX "bid_"
72 #define DECIMAL_PREFIX "dpd_"
75 /* Used for libfunc_hash. */
78 libfunc_hasher::hash (libfunc_entry
*e
)
80 return ((e
->mode1
+ e
->mode2
* NUM_MACHINE_MODES
) ^ e
->op
);
83 /* Used for libfunc_hash. */
86 libfunc_hasher::equal (libfunc_entry
*e1
, libfunc_entry
*e2
)
88 return e1
->op
== e2
->op
&& e1
->mode1
== e2
->mode1
&& e1
->mode2
== e2
->mode2
;
91 /* Return libfunc corresponding operation defined by OPTAB converting
92 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
93 if no libfunc is available. */
95 convert_optab_libfunc (convert_optab optab
, enum machine_mode mode1
,
96 enum machine_mode mode2
)
98 struct libfunc_entry e
;
99 struct libfunc_entry
**slot
;
101 /* ??? This ought to be an assert, but not all of the places
102 that we expand optabs know about the optabs that got moved
104 if (!(optab
>= FIRST_CONV_OPTAB
&& optab
<= LAST_CONVLIB_OPTAB
))
110 slot
= libfunc_hash
->find_slot (&e
, NO_INSERT
);
113 const struct convert_optab_libcall_d
*d
114 = &convlib_def
[optab
- FIRST_CONV_OPTAB
];
116 if (d
->libcall_gen
== NULL
)
119 d
->libcall_gen (optab
, d
->libcall_basename
, mode1
, mode2
);
120 slot
= libfunc_hash
->find_slot (&e
, NO_INSERT
);
124 return (*slot
)->libfunc
;
127 /* Return libfunc corresponding operation defined by OPTAB in MODE.
128 Trigger lazy initialization if needed, return NULL if no libfunc is
131 optab_libfunc (optab optab
, enum machine_mode mode
)
133 struct libfunc_entry e
;
134 struct libfunc_entry
**slot
;
136 /* ??? This ought to be an assert, but not all of the places
137 that we expand optabs know about the optabs that got moved
139 if (!(optab
>= FIRST_NORM_OPTAB
&& optab
<= LAST_NORMLIB_OPTAB
))
145 slot
= libfunc_hash
->find_slot (&e
, NO_INSERT
);
148 const struct optab_libcall_d
*d
149 = &normlib_def
[optab
- FIRST_NORM_OPTAB
];
151 if (d
->libcall_gen
== NULL
)
154 d
->libcall_gen (optab
, d
->libcall_basename
, d
->libcall_suffix
, mode
);
155 slot
= libfunc_hash
->find_slot (&e
, NO_INSERT
);
159 return (*slot
)->libfunc
;
163 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
164 the result of operation CODE applied to OP0 (and OP1 if it is a binary
167 If the last insn does not set TARGET, don't do anything, but return 1.
169 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
170 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
171 try again, ensuring that TARGET is not one of the operands. */
174 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
180 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
182 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
183 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
184 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
185 && GET_RTX_CLASS (code
) != RTX_COMPARE
186 && GET_RTX_CLASS (code
) != RTX_UNARY
)
189 if (GET_CODE (target
) == ZERO_EXTRACT
)
192 for (last_insn
= insns
;
193 NEXT_INSN (last_insn
) != NULL_RTX
;
194 last_insn
= NEXT_INSN (last_insn
))
197 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
198 a value changing in the insn, so the note would be invalid for CSE. */
199 if (reg_overlap_mentioned_p (target
, op0
)
200 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
203 && (rtx_equal_p (target
, op0
)
204 || (op1
&& rtx_equal_p (target
, op1
))))
206 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
207 over expanding it as temp = MEM op X, MEM = temp. If the target
208 supports MEM = MEM op X instructions, it is sometimes too hard
209 to reconstruct that form later, especially if X is also a memory,
210 and due to multiple occurrences of addresses the address might
211 be forced into register unnecessarily.
212 Note that not emitting the REG_EQUIV note might inhibit
213 CSE in some cases. */
214 set
= single_set (last_insn
);
216 && GET_CODE (SET_SRC (set
)) == code
217 && MEM_P (SET_DEST (set
))
218 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
219 || (op1
&& rtx_equal_p (SET_DEST (set
),
220 XEXP (SET_SRC (set
), 1)))))
226 set
= set_for_reg_notes (last_insn
);
230 if (! rtx_equal_p (SET_DEST (set
), target
)
231 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
232 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
233 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
236 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
246 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
248 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
249 if (GET_MODE_SIZE (GET_MODE (op0
))
250 > GET_MODE_SIZE (GET_MODE (target
)))
251 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
252 note
, GET_MODE (op0
));
254 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
255 note
, GET_MODE (op0
));
260 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
264 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
266 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
271 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
272 for a widening operation would be. In most cases this would be OP0, but if
273 that's a constant it'll be VOIDmode, which isn't useful. */
275 static enum machine_mode
276 widened_mode (enum machine_mode to_mode
, rtx op0
, rtx op1
)
278 enum machine_mode m0
= GET_MODE (op0
);
279 enum machine_mode m1
= GET_MODE (op1
);
280 enum machine_mode result
;
282 if (m0
== VOIDmode
&& m1
== VOIDmode
)
284 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
289 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
295 /* Like optab_handler, but for widening_operations that have a
296 TO_MODE and a FROM_MODE. */
299 widening_optab_handler (optab op
, enum machine_mode to_mode
,
300 enum machine_mode from_mode
)
302 unsigned scode
= (op
<< 16) | to_mode
;
303 if (to_mode
!= from_mode
&& from_mode
!= VOIDmode
)
305 /* ??? Why does find_widening_optab_handler_and_mode attempt to
306 widen things that can't be widened? E.g. add_optab... */
307 if (op
> LAST_CONV_OPTAB
)
308 return CODE_FOR_nothing
;
309 scode
|= from_mode
<< 8;
311 return raw_optab_handler (scode
);
314 /* Find a widening optab even if it doesn't widen as much as we want.
315 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
316 direct HI->SI insn, then return SI->DI, if that exists.
317 If PERMIT_NON_WIDENING is non-zero then this can be used with
318 non-widening optabs also. */
321 find_widening_optab_handler_and_mode (optab op
, enum machine_mode to_mode
,
322 enum machine_mode from_mode
,
323 int permit_non_widening
,
324 enum machine_mode
*found_mode
)
326 for (; (permit_non_widening
|| from_mode
!= to_mode
)
327 && GET_MODE_SIZE (from_mode
) <= GET_MODE_SIZE (to_mode
)
328 && from_mode
!= VOIDmode
;
329 from_mode
= GET_MODE_WIDER_MODE (from_mode
))
331 enum insn_code handler
= widening_optab_handler (op
, to_mode
,
334 if (handler
!= CODE_FOR_nothing
)
337 *found_mode
= from_mode
;
342 return CODE_FOR_nothing
;
345 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
346 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
347 not actually do a sign-extend or zero-extend, but can leave the
348 higher-order bits of the result rtx undefined, for example, in the case
349 of logical operations, but not right shifts. */
352 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
353 int unsignedp
, int no_extend
)
357 /* If we don't have to extend and this is a constant, return it. */
358 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
361 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
362 extend since it will be more efficient to do so unless the signedness of
363 a promoted object differs from our extension. */
365 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
366 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
367 return convert_modes (mode
, oldmode
, op
, unsignedp
);
369 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
371 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
372 return gen_lowpart (mode
, force_reg (GET_MODE (op
), op
));
374 /* Otherwise, get an object of MODE, clobber it, and set the low-order
377 result
= gen_reg_rtx (mode
);
378 emit_clobber (result
);
379 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
383 /* Return the optab used for computing the operation given by the tree code,
384 CODE and the tree EXP. This function is not always usable (for example, it
385 cannot give complete results for multiplication or division) but probably
386 ought to be relied on more widely throughout the expander. */
388 optab_for_tree_code (enum tree_code code
, const_tree type
,
389 enum optab_subtype subtype
)
401 return one_cmpl_optab
;
406 case MULT_HIGHPART_EXPR
:
407 return TYPE_UNSIGNED (type
) ? umul_highpart_optab
: smul_highpart_optab
;
413 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
421 if (TYPE_SATURATING (type
))
422 return TYPE_UNSIGNED (type
) ? usdiv_optab
: ssdiv_optab
;
423 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
426 if (TREE_CODE (type
) == VECTOR_TYPE
)
428 if (subtype
== optab_vector
)
429 return TYPE_SATURATING (type
) ? unknown_optab
: vashl_optab
;
431 gcc_assert (subtype
== optab_scalar
);
433 if (TYPE_SATURATING (type
))
434 return TYPE_UNSIGNED (type
) ? usashl_optab
: ssashl_optab
;
438 if (TREE_CODE (type
) == VECTOR_TYPE
)
440 if (subtype
== optab_vector
)
441 return TYPE_UNSIGNED (type
) ? vlshr_optab
: vashr_optab
;
443 gcc_assert (subtype
== optab_scalar
);
445 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
448 if (TREE_CODE (type
) == VECTOR_TYPE
)
450 if (subtype
== optab_vector
)
453 gcc_assert (subtype
== optab_scalar
);
458 if (TREE_CODE (type
) == VECTOR_TYPE
)
460 if (subtype
== optab_vector
)
463 gcc_assert (subtype
== optab_scalar
);
468 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
471 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
473 case REALIGN_LOAD_EXPR
:
474 return vec_realign_load_optab
;
477 return TYPE_UNSIGNED (type
) ? usum_widen_optab
: ssum_widen_optab
;
480 return TYPE_UNSIGNED (type
) ? udot_prod_optab
: sdot_prod_optab
;
483 return TYPE_UNSIGNED (type
) ? usad_optab
: ssad_optab
;
485 case WIDEN_MULT_PLUS_EXPR
:
486 return (TYPE_UNSIGNED (type
)
487 ? (TYPE_SATURATING (type
)
488 ? usmadd_widen_optab
: umadd_widen_optab
)
489 : (TYPE_SATURATING (type
)
490 ? ssmadd_widen_optab
: smadd_widen_optab
));
492 case WIDEN_MULT_MINUS_EXPR
:
493 return (TYPE_UNSIGNED (type
)
494 ? (TYPE_SATURATING (type
)
495 ? usmsub_widen_optab
: umsub_widen_optab
)
496 : (TYPE_SATURATING (type
)
497 ? ssmsub_widen_optab
: smsub_widen_optab
));
503 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
506 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
508 case REDUC_PLUS_EXPR
:
509 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
511 case VEC_LSHIFT_EXPR
:
512 return vec_shl_optab
;
514 case VEC_RSHIFT_EXPR
:
515 return vec_shr_optab
;
517 case VEC_WIDEN_MULT_HI_EXPR
:
518 return TYPE_UNSIGNED (type
) ?
519 vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
521 case VEC_WIDEN_MULT_LO_EXPR
:
522 return TYPE_UNSIGNED (type
) ?
523 vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
525 case VEC_WIDEN_MULT_EVEN_EXPR
:
526 return TYPE_UNSIGNED (type
) ?
527 vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
529 case VEC_WIDEN_MULT_ODD_EXPR
:
530 return TYPE_UNSIGNED (type
) ?
531 vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
533 case VEC_WIDEN_LSHIFT_HI_EXPR
:
534 return TYPE_UNSIGNED (type
) ?
535 vec_widen_ushiftl_hi_optab
: vec_widen_sshiftl_hi_optab
;
537 case VEC_WIDEN_LSHIFT_LO_EXPR
:
538 return TYPE_UNSIGNED (type
) ?
539 vec_widen_ushiftl_lo_optab
: vec_widen_sshiftl_lo_optab
;
541 case VEC_UNPACK_HI_EXPR
:
542 return TYPE_UNSIGNED (type
) ?
543 vec_unpacku_hi_optab
: vec_unpacks_hi_optab
;
545 case VEC_UNPACK_LO_EXPR
:
546 return TYPE_UNSIGNED (type
) ?
547 vec_unpacku_lo_optab
: vec_unpacks_lo_optab
;
549 case VEC_UNPACK_FLOAT_HI_EXPR
:
550 /* The signedness is determined from input operand. */
551 return TYPE_UNSIGNED (type
) ?
552 vec_unpacku_float_hi_optab
: vec_unpacks_float_hi_optab
;
554 case VEC_UNPACK_FLOAT_LO_EXPR
:
555 /* The signedness is determined from input operand. */
556 return TYPE_UNSIGNED (type
) ?
557 vec_unpacku_float_lo_optab
: vec_unpacks_float_lo_optab
;
559 case VEC_PACK_TRUNC_EXPR
:
560 return vec_pack_trunc_optab
;
562 case VEC_PACK_SAT_EXPR
:
563 return TYPE_UNSIGNED (type
) ? vec_pack_usat_optab
: vec_pack_ssat_optab
;
565 case VEC_PACK_FIX_TRUNC_EXPR
:
566 /* The signedness is determined from output operand. */
567 return TYPE_UNSIGNED (type
) ?
568 vec_pack_ufix_trunc_optab
: vec_pack_sfix_trunc_optab
;
574 trapv
= INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
);
577 case POINTER_PLUS_EXPR
:
579 if (TYPE_SATURATING (type
))
580 return TYPE_UNSIGNED (type
) ? usadd_optab
: ssadd_optab
;
581 return trapv
? addv_optab
: add_optab
;
584 if (TYPE_SATURATING (type
))
585 return TYPE_UNSIGNED (type
) ? ussub_optab
: sssub_optab
;
586 return trapv
? subv_optab
: sub_optab
;
589 if (TYPE_SATURATING (type
))
590 return TYPE_UNSIGNED (type
) ? usmul_optab
: ssmul_optab
;
591 return trapv
? smulv_optab
: smul_optab
;
594 if (TYPE_SATURATING (type
))
595 return TYPE_UNSIGNED (type
) ? usneg_optab
: ssneg_optab
;
596 return trapv
? negv_optab
: neg_optab
;
599 return trapv
? absv_optab
: abs_optab
;
602 return unknown_optab
;
607 /* Expand vector widening operations.
609 There are two different classes of operations handled here:
610 1) Operations whose result is wider than all the arguments to the operation.
611 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
612 In this case OP0 and optionally OP1 would be initialized,
613 but WIDE_OP wouldn't (not relevant for this case).
614 2) Operations whose result is of the same size as the last argument to the
615 operation, but wider than all the other arguments to the operation.
616 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
617 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
619 E.g, when called to expand the following operations, this is how
620 the arguments will be initialized:
622 widening-sum 2 oprnd0 - oprnd1
623 widening-dot-product 3 oprnd0 oprnd1 oprnd2
624 widening-mult 2 oprnd0 oprnd1 -
625 type-promotion (vec-unpack) 1 oprnd0 - - */
628 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
629 rtx target
, int unsignedp
)
631 struct expand_operand eops
[4];
632 tree oprnd0
, oprnd1
, oprnd2
;
633 enum machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
634 optab widen_pattern_optab
;
635 enum insn_code icode
;
636 int nops
= TREE_CODE_LENGTH (ops
->code
);
640 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
641 widen_pattern_optab
=
642 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
643 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
644 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
645 icode
= find_widening_optab_handler (widen_pattern_optab
,
646 TYPE_MODE (TREE_TYPE (ops
->op2
)),
649 icode
= optab_handler (widen_pattern_optab
, tmode0
);
650 gcc_assert (icode
!= CODE_FOR_nothing
);
655 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
658 /* The last operand is of a wider mode than the rest of the operands. */
663 gcc_assert (tmode1
== tmode0
);
666 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
670 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
671 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
673 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
675 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
676 expand_insn (icode
, op
, eops
);
677 return eops
[0].value
;
680 /* Generate code to perform an operation specified by TERNARY_OPTAB
681 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
683 UNSIGNEDP is for the case where we have to widen the operands
684 to perform the operation. It says to use zero-extension.
686 If TARGET is nonzero, the value
687 is generated there, if it is convenient to do so.
688 In all cases an rtx is returned for the locus of the value;
689 this may or may not be TARGET. */
692 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
693 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
695 struct expand_operand ops
[4];
696 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
698 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
700 create_output_operand (&ops
[0], target
, mode
);
701 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
702 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
703 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
704 expand_insn (icode
, 4, ops
);
709 /* Like expand_binop, but return a constant rtx if the result can be
710 calculated at compile time. The arguments and return value are
711 otherwise the same as for expand_binop. */
714 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
715 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
716 enum optab_methods methods
)
718 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
720 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
726 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
729 /* Like simplify_expand_binop, but always put the result in TARGET.
730 Return true if the expansion succeeded. */
733 force_expand_binop (enum machine_mode mode
, optab binoptab
,
734 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
735 enum optab_methods methods
)
737 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
738 target
, unsignedp
, methods
);
742 emit_move_insn (target
, x
);
746 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
749 expand_vec_shift_expr (sepops ops
, rtx target
)
751 struct expand_operand eops
[3];
752 enum insn_code icode
;
753 rtx rtx_op1
, rtx_op2
;
754 enum machine_mode mode
= TYPE_MODE (ops
->type
);
755 tree vec_oprnd
= ops
->op0
;
756 tree shift_oprnd
= ops
->op1
;
761 case VEC_RSHIFT_EXPR
:
762 shift_optab
= vec_shr_optab
;
764 case VEC_LSHIFT_EXPR
:
765 shift_optab
= vec_shl_optab
;
771 icode
= optab_handler (shift_optab
, mode
);
772 gcc_assert (icode
!= CODE_FOR_nothing
);
774 rtx_op1
= expand_normal (vec_oprnd
);
775 rtx_op2
= expand_normal (shift_oprnd
);
777 create_output_operand (&eops
[0], target
, mode
);
778 create_input_operand (&eops
[1], rtx_op1
, GET_MODE (rtx_op1
));
779 create_convert_operand_from_type (&eops
[2], rtx_op2
, TREE_TYPE (shift_oprnd
));
780 expand_insn (icode
, 3, eops
);
782 return eops
[0].value
;
785 /* Create a new vector value in VMODE with all elements set to OP. The
786 mode of OP must be the element mode of VMODE. If OP is a constant,
787 then the return value will be a constant. */
790 expand_vector_broadcast (enum machine_mode vmode
, rtx op
)
792 enum insn_code icode
;
797 gcc_checking_assert (VECTOR_MODE_P (vmode
));
799 n
= GET_MODE_NUNITS (vmode
);
800 vec
= rtvec_alloc (n
);
801 for (i
= 0; i
< n
; ++i
)
802 RTVEC_ELT (vec
, i
) = op
;
805 return gen_rtx_CONST_VECTOR (vmode
, vec
);
807 /* ??? If the target doesn't have a vec_init, then we have no easy way
808 of performing this operation. Most of this sort of generic support
809 is hidden away in the vector lowering support in gimple. */
810 icode
= optab_handler (vec_init_optab
, vmode
);
811 if (icode
== CODE_FOR_nothing
)
814 ret
= gen_reg_rtx (vmode
);
815 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
820 /* This subroutine of expand_doubleword_shift handles the cases in which
821 the effective shift value is >= BITS_PER_WORD. The arguments and return
822 value are the same as for the parent routine, except that SUPERWORD_OP1
823 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
824 INTO_TARGET may be null if the caller has decided to calculate it. */
827 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
828 rtx outof_target
, rtx into_target
,
829 int unsignedp
, enum optab_methods methods
)
831 if (into_target
!= 0)
832 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
833 into_target
, unsignedp
, methods
))
836 if (outof_target
!= 0)
838 /* For a signed right shift, we must fill OUTOF_TARGET with copies
839 of the sign bit, otherwise we must fill it with zeros. */
840 if (binoptab
!= ashr_optab
)
841 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
843 if (!force_expand_binop (word_mode
, binoptab
,
844 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
845 outof_target
, unsignedp
, methods
))
851 /* This subroutine of expand_doubleword_shift handles the cases in which
852 the effective shift value is < BITS_PER_WORD. The arguments and return
853 value are the same as for the parent routine. */
856 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
857 rtx outof_input
, rtx into_input
, rtx op1
,
858 rtx outof_target
, rtx into_target
,
859 int unsignedp
, enum optab_methods methods
,
860 unsigned HOST_WIDE_INT shift_mask
)
862 optab reverse_unsigned_shift
, unsigned_shift
;
865 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
866 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
868 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
869 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
870 the opposite direction to BINOPTAB. */
871 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
873 carries
= outof_input
;
874 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
875 op1_mode
), op1_mode
);
876 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
881 /* We must avoid shifting by BITS_PER_WORD bits since that is either
882 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
883 has unknown behavior. Do a single shift first, then shift by the
884 remainder. It's OK to use ~OP1 as the remainder if shift counts
885 are truncated to the mode size. */
886 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
887 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
888 if (shift_mask
== BITS_PER_WORD
- 1)
890 tmp
= immed_wide_int_const
891 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
892 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
897 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
898 op1_mode
), op1_mode
);
899 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
903 if (tmp
== 0 || carries
== 0)
905 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
906 carries
, tmp
, 0, unsignedp
, methods
);
910 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
911 so the result can go directly into INTO_TARGET if convenient. */
912 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
913 into_target
, unsignedp
, methods
);
917 /* Now OR in the bits carried over from OUTOF_INPUT. */
918 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
919 into_target
, unsignedp
, methods
))
922 /* Use a standard word_mode shift for the out-of half. */
923 if (outof_target
!= 0)
924 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
925 outof_target
, unsignedp
, methods
))
932 #ifdef HAVE_conditional_move
933 /* Try implementing expand_doubleword_shift using conditional moves.
934 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
935 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
936 are the shift counts to use in the former and latter case. All other
937 arguments are the same as the parent routine. */
940 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
941 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
942 rtx outof_input
, rtx into_input
,
943 rtx subword_op1
, rtx superword_op1
,
944 rtx outof_target
, rtx into_target
,
945 int unsignedp
, enum optab_methods methods
,
946 unsigned HOST_WIDE_INT shift_mask
)
948 rtx outof_superword
, into_superword
;
950 /* Put the superword version of the output into OUTOF_SUPERWORD and
952 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
953 if (outof_target
!= 0 && subword_op1
== superword_op1
)
955 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
956 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
957 into_superword
= outof_target
;
958 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
959 outof_superword
, 0, unsignedp
, methods
))
964 into_superword
= gen_reg_rtx (word_mode
);
965 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
966 outof_superword
, into_superword
,
971 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
972 if (!expand_subword_shift (op1_mode
, binoptab
,
973 outof_input
, into_input
, subword_op1
,
974 outof_target
, into_target
,
975 unsignedp
, methods
, shift_mask
))
978 /* Select between them. Do the INTO half first because INTO_SUPERWORD
979 might be the current value of OUTOF_TARGET. */
980 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
981 into_target
, into_superword
, word_mode
, false))
984 if (outof_target
!= 0)
985 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
986 outof_target
, outof_superword
,
994 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
995 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
996 input operand; the shift moves bits in the direction OUTOF_INPUT->
997 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
998 of the target. OP1 is the shift count and OP1_MODE is its mode.
999 If OP1 is constant, it will have been truncated as appropriate
1000 and is known to be nonzero.
1002 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1003 shift count is outside the range [0, BITS_PER_WORD). This routine must
1004 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1006 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1007 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1008 fill with zeros or sign bits as appropriate.
1010 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1011 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1012 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1013 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1016 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1017 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1018 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1019 function wants to calculate it itself.
1021 Return true if the shift could be successfully synthesized. */
1024 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
1025 rtx outof_input
, rtx into_input
, rtx op1
,
1026 rtx outof_target
, rtx into_target
,
1027 int unsignedp
, enum optab_methods methods
,
1028 unsigned HOST_WIDE_INT shift_mask
)
1030 rtx superword_op1
, tmp
, cmp1
, cmp2
;
1031 enum rtx_code cmp_code
;
1033 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1034 fill the result with sign or zero bits as appropriate. If so, the value
1035 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1036 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1037 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1039 This isn't worthwhile for constant shifts since the optimizers will
1040 cope better with in-range shift counts. */
1041 if (shift_mask
>= BITS_PER_WORD
1042 && outof_target
!= 0
1043 && !CONSTANT_P (op1
))
1045 if (!expand_doubleword_shift (op1_mode
, binoptab
,
1046 outof_input
, into_input
, op1
,
1048 unsignedp
, methods
, shift_mask
))
1050 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
1051 outof_target
, unsignedp
, methods
))
1056 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1057 is true when the effective shift value is less than BITS_PER_WORD.
1058 Set SUPERWORD_OP1 to the shift count that should be used to shift
1059 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1060 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
1061 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
1063 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1064 is a subword shift count. */
1065 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
1067 cmp2
= CONST0_RTX (op1_mode
);
1069 superword_op1
= op1
;
1073 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1074 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
1076 cmp2
= CONST0_RTX (op1_mode
);
1078 superword_op1
= cmp1
;
1083 /* If we can compute the condition at compile time, pick the
1084 appropriate subroutine. */
1085 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
1086 if (tmp
!= 0 && CONST_INT_P (tmp
))
1088 if (tmp
== const0_rtx
)
1089 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1090 outof_target
, into_target
,
1091 unsignedp
, methods
);
1093 return expand_subword_shift (op1_mode
, binoptab
,
1094 outof_input
, into_input
, op1
,
1095 outof_target
, into_target
,
1096 unsignedp
, methods
, shift_mask
);
1099 #ifdef HAVE_conditional_move
1100 /* Try using conditional moves to generate straight-line code. */
1102 rtx_insn
*start
= get_last_insn ();
1103 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
1104 cmp_code
, cmp1
, cmp2
,
1105 outof_input
, into_input
,
1107 outof_target
, into_target
,
1108 unsignedp
, methods
, shift_mask
))
1110 delete_insns_since (start
);
1114 /* As a last resort, use branches to select the correct alternative. */
1115 rtx_code_label
*subword_label
= gen_label_rtx ();
1116 rtx_code_label
*done_label
= gen_label_rtx ();
1119 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
1120 0, 0, subword_label
, -1);
1123 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
1124 outof_target
, into_target
,
1125 unsignedp
, methods
))
1128 emit_jump_insn (gen_jump (done_label
));
1130 emit_label (subword_label
);
1132 if (!expand_subword_shift (op1_mode
, binoptab
,
1133 outof_input
, into_input
, op1
,
1134 outof_target
, into_target
,
1135 unsignedp
, methods
, shift_mask
))
1138 emit_label (done_label
);
1142 /* Subroutine of expand_binop. Perform a double word multiplication of
1143 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1144 as the target's word_mode. This function return NULL_RTX if anything
1145 goes wrong, in which case it may have already emitted instructions
1146 which need to be deleted.
1148 If we want to multiply two two-word values and have normal and widening
1149 multiplies of single-word values, we can do this with three smaller
1152 The multiplication proceeds as follows:
1153 _______________________
1154 [__op0_high_|__op0_low__]
1155 _______________________
1156 * [__op1_high_|__op1_low__]
1157 _______________________________________________
1158 _______________________
1159 (1) [__op0_low__*__op1_low__]
1160 _______________________
1161 (2a) [__op0_low__*__op1_high_]
1162 _______________________
1163 (2b) [__op0_high_*__op1_low__]
1164 _______________________
1165 (3) [__op0_high_*__op1_high_]
1168 This gives a 4-word result. Since we are only interested in the
1169 lower 2 words, partial result (3) and the upper words of (2a) and
1170 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1171 calculated using non-widening multiplication.
1173 (1), however, needs to be calculated with an unsigned widening
1174 multiplication. If this operation is not directly supported we
1175 try using a signed widening multiplication and adjust the result.
1176 This adjustment works as follows:
1178 If both operands are positive then no adjustment is needed.
1180 If the operands have different signs, for example op0_low < 0 and
1181 op1_low >= 0, the instruction treats the most significant bit of
1182 op0_low as a sign bit instead of a bit with significance
1183 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1184 with 2**BITS_PER_WORD - op0_low, and two's complements the
1185 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1188 Similarly, if both operands are negative, we need to add
1189 (op0_low + op1_low) * 2**BITS_PER_WORD.
1191 We use a trick to adjust quickly. We logically shift op0_low right
1192 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1193 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1194 logical shift exists, we do an arithmetic right shift and subtract
1198 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
1199 bool umulp
, enum optab_methods methods
)
1201 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1202 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1203 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
1204 rtx product
, adjust
, product_high
, temp
;
1206 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1207 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1208 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1209 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1211 /* If we're using an unsigned multiply to directly compute the product
1212 of the low-order words of the operands and perform any required
1213 adjustments of the operands, we begin by trying two more multiplications
1214 and then computing the appropriate sum.
1216 We have checked above that the required addition is provided.
1217 Full-word addition will normally always succeed, especially if
1218 it is provided at all, so we don't worry about its failure. The
1219 multiplication may well fail, however, so we do handle that. */
1223 /* ??? This could be done with emit_store_flag where available. */
1224 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1225 NULL_RTX
, 1, methods
);
1227 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
1228 NULL_RTX
, 0, OPTAB_DIRECT
);
1231 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1232 NULL_RTX
, 0, methods
);
1235 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
1236 NULL_RTX
, 0, OPTAB_DIRECT
);
1243 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
1244 NULL_RTX
, 0, OPTAB_DIRECT
);
1248 /* OP0_HIGH should now be dead. */
1252 /* ??? This could be done with emit_store_flag where available. */
1253 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1254 NULL_RTX
, 1, methods
);
1256 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
1257 NULL_RTX
, 0, OPTAB_DIRECT
);
1260 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1261 NULL_RTX
, 0, methods
);
1264 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
1265 NULL_RTX
, 0, OPTAB_DIRECT
);
1272 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
1273 NULL_RTX
, 0, OPTAB_DIRECT
);
1277 /* OP1_HIGH should now be dead. */
1279 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
1280 NULL_RTX
, 0, OPTAB_DIRECT
);
1282 if (target
&& !REG_P (target
))
1286 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1287 target
, 1, OPTAB_DIRECT
);
1289 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1290 target
, 1, OPTAB_DIRECT
);
1295 product_high
= operand_subword (product
, high
, 1, mode
);
1296 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
1297 NULL_RTX
, 0, OPTAB_DIRECT
);
1298 emit_move_insn (product_high
, adjust
);
1302 /* Wrapper around expand_binop which takes an rtx code to specify
1303 the operation to perform, not an optab pointer. All other
1304 arguments are the same. */
1306 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
1307 rtx op1
, rtx target
, int unsignedp
,
1308 enum optab_methods methods
)
1310 optab binop
= code_to_optab (code
);
1313 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1316 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1317 binop. Order them according to commutative_operand_precedence and, if
1318 possible, try to put TARGET or a pseudo first. */
1320 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1322 int op0_prec
= commutative_operand_precedence (op0
);
1323 int op1_prec
= commutative_operand_precedence (op1
);
1325 if (op0_prec
< op1_prec
)
1328 if (op0_prec
> op1_prec
)
1331 /* With equal precedence, both orders are ok, but it is better if the
1332 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1333 if (target
== 0 || REG_P (target
))
1334 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1336 return rtx_equal_p (op1
, target
);
1339 /* Return true if BINOPTAB implements a shift operation. */
1342 shift_optab_p (optab binoptab
)
1344 switch (optab_to_code (binoptab
))
1360 /* Return true if BINOPTAB implements a commutative binary operation. */
1363 commutative_optab_p (optab binoptab
)
1365 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
1366 || binoptab
== smul_widen_optab
1367 || binoptab
== umul_widen_optab
1368 || binoptab
== smul_highpart_optab
1369 || binoptab
== umul_highpart_optab
);
1372 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1373 optimizing, and if the operand is a constant that costs more than
1374 1 instruction, force the constant into a register and return that
1375 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1378 avoid_expensive_constant (enum machine_mode mode
, optab binoptab
,
1379 int opn
, rtx x
, bool unsignedp
)
1381 bool speed
= optimize_insn_for_speed_p ();
1383 if (mode
!= VOIDmode
1386 && (rtx_cost (x
, optab_to_code (binoptab
), opn
, speed
)
1387 > set_src_cost (x
, speed
)))
1389 if (CONST_INT_P (x
))
1391 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1392 if (intval
!= INTVAL (x
))
1393 x
= GEN_INT (intval
);
1396 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1397 x
= force_reg (mode
, x
);
1402 /* Helper function for expand_binop: handle the case where there
1403 is an insn that directly implements the indicated operation.
1404 Returns null if this is not possible. */
1406 expand_binop_directly (enum machine_mode mode
, optab binoptab
,
1408 rtx target
, int unsignedp
, enum optab_methods methods
,
1411 enum machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1412 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
1414 enum machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1415 enum machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1416 enum machine_mode mode0
, mode1
, tmp_mode
;
1417 struct expand_operand ops
[3];
1420 rtx xop0
= op0
, xop1
= op1
;
1423 /* If it is a commutative operator and the modes would match
1424 if we would swap the operands, we can save the conversions. */
1425 commutative_p
= commutative_optab_p (binoptab
);
1427 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1428 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1435 /* If we are optimizing, force expensive constants into a register. */
1436 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1437 if (!shift_optab_p (binoptab
))
1438 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1440 /* In case the insn wants input operands in modes different from
1441 those of the actual operands, convert the operands. It would
1442 seem that we don't need to convert CONST_INTs, but we do, so
1443 that they're properly zero-extended, sign-extended or truncated
1446 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1447 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1449 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1453 mode1
= GET_MODE (xop1
) != VOIDmode
? GET_MODE (xop1
) : mode
;
1454 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1456 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1460 /* If operation is commutative,
1461 try to make the first operand a register.
1462 Even better, try to make it the same as the target.
1463 Also try to make the last operand a constant. */
1465 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1472 /* Now, if insn's predicates don't allow our operands, put them into
1475 if (binoptab
== vec_pack_trunc_optab
1476 || binoptab
== vec_pack_usat_optab
1477 || binoptab
== vec_pack_ssat_optab
1478 || binoptab
== vec_pack_ufix_trunc_optab
1479 || binoptab
== vec_pack_sfix_trunc_optab
)
1481 /* The mode of the result is different then the mode of the
1483 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1484 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1486 delete_insns_since (last
);
1493 create_output_operand (&ops
[0], target
, tmp_mode
);
1494 create_input_operand (&ops
[1], xop0
, mode0
);
1495 create_input_operand (&ops
[2], xop1
, mode1
);
1496 pat
= maybe_gen_insn (icode
, 3, ops
);
1499 /* If PAT is composed of more than one insn, try to add an appropriate
1500 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1501 operand, call expand_binop again, this time without a target. */
1502 if (INSN_P (pat
) && NEXT_INSN (as_a
<rtx_insn
*> (pat
)) != NULL_RTX
1503 && ! add_equal_note (as_a
<rtx_insn
*> (pat
), ops
[0].value
,
1504 optab_to_code (binoptab
),
1505 ops
[1].value
, ops
[2].value
))
1507 delete_insns_since (last
);
1508 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1509 unsignedp
, methods
);
1513 return ops
[0].value
;
1515 delete_insns_since (last
);
1519 /* Generate code to perform an operation specified by BINOPTAB
1520 on operands OP0 and OP1, with result having machine-mode MODE.
1522 UNSIGNEDP is for the case where we have to widen the operands
1523 to perform the operation. It says to use zero-extension.
1525 If TARGET is nonzero, the value
1526 is generated there, if it is convenient to do so.
1527 In all cases an rtx is returned for the locus of the value;
1528 this may or may not be TARGET. */
1531 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1532 rtx target
, int unsignedp
, enum optab_methods methods
)
1534 enum optab_methods next_methods
1535 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1536 ? OPTAB_WIDEN
: methods
);
1537 enum mode_class mclass
;
1538 enum machine_mode wider_mode
;
1541 rtx_insn
*entry_last
= get_last_insn ();
1544 mclass
= GET_MODE_CLASS (mode
);
1546 /* If subtracting an integer constant, convert this into an addition of
1547 the negated constant. */
1549 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1551 op1
= negate_rtx (mode
, op1
);
1552 binoptab
= add_optab
;
1555 /* Record where to delete back to if we backtrack. */
1556 last
= get_last_insn ();
1558 /* If we can do it with a three-operand insn, do so. */
1560 if (methods
!= OPTAB_MUST_WIDEN
1561 && find_widening_optab_handler (binoptab
, mode
,
1562 widened_mode (mode
, op0
, op1
), 1)
1563 != CODE_FOR_nothing
)
1565 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1566 unsignedp
, methods
, last
);
1571 /* If we were trying to rotate, and that didn't work, try rotating
1572 the other direction before falling back to shifts and bitwise-or. */
1573 if (((binoptab
== rotl_optab
1574 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1575 || (binoptab
== rotr_optab
1576 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1577 && mclass
== MODE_INT
)
1579 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1581 unsigned int bits
= GET_MODE_PRECISION (mode
);
1583 if (CONST_INT_P (op1
))
1584 newop1
= GEN_INT (bits
- INTVAL (op1
));
1585 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1586 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1588 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1589 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1590 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1592 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1593 target
, unsignedp
, methods
, last
);
1598 /* If this is a multiply, see if we can do a widening operation that
1599 takes operands of this mode and makes a wider mode. */
1601 if (binoptab
== smul_optab
1602 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1603 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1604 : smul_widen_optab
),
1605 GET_MODE_2XWIDER_MODE (mode
), mode
)
1606 != CODE_FOR_nothing
))
1608 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1609 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1610 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1614 if (GET_MODE_CLASS (mode
) == MODE_INT
1615 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1616 return gen_lowpart (mode
, temp
);
1618 return convert_to_mode (mode
, temp
, unsignedp
);
1622 /* If this is a vector shift by a scalar, see if we can do a vector
1623 shift by a vector. If so, broadcast the scalar into a vector. */
1624 if (mclass
== MODE_VECTOR_INT
)
1626 optab otheroptab
= unknown_optab
;
1628 if (binoptab
== ashl_optab
)
1629 otheroptab
= vashl_optab
;
1630 else if (binoptab
== ashr_optab
)
1631 otheroptab
= vashr_optab
;
1632 else if (binoptab
== lshr_optab
)
1633 otheroptab
= vlshr_optab
;
1634 else if (binoptab
== rotl_optab
)
1635 otheroptab
= vrotl_optab
;
1636 else if (binoptab
== rotr_optab
)
1637 otheroptab
= vrotr_optab
;
1639 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1641 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1644 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1645 target
, unsignedp
, methods
, last
);
1652 /* Look for a wider mode of the same class for which we think we
1653 can open-code the operation. Check for a widening multiply at the
1654 wider mode as well. */
1656 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1657 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1658 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1659 wider_mode
!= VOIDmode
;
1660 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1662 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1663 || (binoptab
== smul_optab
1664 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1665 && (find_widening_optab_handler ((unsignedp
1667 : smul_widen_optab
),
1668 GET_MODE_WIDER_MODE (wider_mode
),
1670 != CODE_FOR_nothing
)))
1672 rtx xop0
= op0
, xop1
= op1
;
1675 /* For certain integer operations, we need not actually extend
1676 the narrow operands, as long as we will truncate
1677 the results to the same narrowness. */
1679 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1680 || binoptab
== xor_optab
1681 || binoptab
== add_optab
|| binoptab
== sub_optab
1682 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1683 && mclass
== MODE_INT
)
1686 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1688 if (binoptab
!= ashl_optab
)
1689 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1693 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1695 /* The second operand of a shift must always be extended. */
1696 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1697 no_extend
&& binoptab
!= ashl_optab
);
1699 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1700 unsignedp
, OPTAB_DIRECT
);
1703 if (mclass
!= MODE_INT
1704 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1707 target
= gen_reg_rtx (mode
);
1708 convert_move (target
, temp
, 0);
1712 return gen_lowpart (mode
, temp
);
1715 delete_insns_since (last
);
1719 /* If operation is commutative,
1720 try to make the first operand a register.
1721 Even better, try to make it the same as the target.
1722 Also try to make the last operand a constant. */
1723 if (commutative_optab_p (binoptab
)
1724 && swap_commutative_operands_with_target (target
, op0
, op1
))
1731 /* These can be done a word at a time. */
1732 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1733 && mclass
== MODE_INT
1734 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1735 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1740 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1741 won't be accurate, so use a new target. */
1745 || !valid_multiword_target_p (target
))
1746 target
= gen_reg_rtx (mode
);
1750 /* Do the actual arithmetic. */
1751 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1753 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1754 rtx x
= expand_binop (word_mode
, binoptab
,
1755 operand_subword_force (op0
, i
, mode
),
1756 operand_subword_force (op1
, i
, mode
),
1757 target_piece
, unsignedp
, next_methods
);
1762 if (target_piece
!= x
)
1763 emit_move_insn (target_piece
, x
);
1766 insns
= get_insns ();
1769 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1776 /* Synthesize double word shifts from single word shifts. */
1777 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1778 || binoptab
== ashr_optab
)
1779 && mclass
== MODE_INT
1780 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1781 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1782 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1783 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1784 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1785 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1787 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1788 enum machine_mode op1_mode
;
1790 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1791 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1792 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1794 /* Apply the truncation to constant shifts. */
1795 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1796 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1798 if (op1
== CONST0_RTX (op1_mode
))
1801 /* Make sure that this is a combination that expand_doubleword_shift
1802 can handle. See the comments there for details. */
1803 if (double_shift_mask
== 0
1804 || (shift_mask
== BITS_PER_WORD
- 1
1805 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1808 rtx into_target
, outof_target
;
1809 rtx into_input
, outof_input
;
1810 int left_shift
, outof_word
;
1812 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1813 won't be accurate, so use a new target. */
1817 || !valid_multiword_target_p (target
))
1818 target
= gen_reg_rtx (mode
);
1822 /* OUTOF_* is the word we are shifting bits away from, and
1823 INTO_* is the word that we are shifting bits towards, thus
1824 they differ depending on the direction of the shift and
1825 WORDS_BIG_ENDIAN. */
1827 left_shift
= binoptab
== ashl_optab
;
1828 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1830 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1831 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1833 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1834 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1836 if (expand_doubleword_shift (op1_mode
, binoptab
,
1837 outof_input
, into_input
, op1
,
1838 outof_target
, into_target
,
1839 unsignedp
, next_methods
, shift_mask
))
1841 insns
= get_insns ();
1851 /* Synthesize double word rotates from single word shifts. */
1852 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1853 && mclass
== MODE_INT
1854 && CONST_INT_P (op1
)
1855 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1856 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1857 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1860 rtx into_target
, outof_target
;
1861 rtx into_input
, outof_input
;
1863 int shift_count
, left_shift
, outof_word
;
1865 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1866 won't be accurate, so use a new target. Do this also if target is not
1867 a REG, first because having a register instead may open optimization
1868 opportunities, and second because if target and op0 happen to be MEMs
1869 designating the same location, we would risk clobbering it too early
1870 in the code sequence we generate below. */
1875 || !valid_multiword_target_p (target
))
1876 target
= gen_reg_rtx (mode
);
1880 shift_count
= INTVAL (op1
);
1882 /* OUTOF_* is the word we are shifting bits away from, and
1883 INTO_* is the word that we are shifting bits towards, thus
1884 they differ depending on the direction of the shift and
1885 WORDS_BIG_ENDIAN. */
1887 left_shift
= (binoptab
== rotl_optab
);
1888 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1890 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1891 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1893 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1894 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1896 if (shift_count
== BITS_PER_WORD
)
1898 /* This is just a word swap. */
1899 emit_move_insn (outof_target
, into_input
);
1900 emit_move_insn (into_target
, outof_input
);
1905 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1906 rtx first_shift_count
, second_shift_count
;
1907 optab reverse_unsigned_shift
, unsigned_shift
;
1909 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1910 ? lshr_optab
: ashl_optab
);
1912 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1913 ? ashl_optab
: lshr_optab
);
1915 if (shift_count
> BITS_PER_WORD
)
1917 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1918 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1922 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1923 second_shift_count
= GEN_INT (shift_count
);
1926 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1927 outof_input
, first_shift_count
,
1928 NULL_RTX
, unsignedp
, next_methods
);
1929 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1930 into_input
, second_shift_count
,
1931 NULL_RTX
, unsignedp
, next_methods
);
1933 if (into_temp1
!= 0 && into_temp2
!= 0)
1934 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1935 into_target
, unsignedp
, next_methods
);
1939 if (inter
!= 0 && inter
!= into_target
)
1940 emit_move_insn (into_target
, inter
);
1942 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1943 into_input
, first_shift_count
,
1944 NULL_RTX
, unsignedp
, next_methods
);
1945 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1946 outof_input
, second_shift_count
,
1947 NULL_RTX
, unsignedp
, next_methods
);
1949 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1950 inter
= expand_binop (word_mode
, ior_optab
,
1951 outof_temp1
, outof_temp2
,
1952 outof_target
, unsignedp
, next_methods
);
1954 if (inter
!= 0 && inter
!= outof_target
)
1955 emit_move_insn (outof_target
, inter
);
1958 insns
= get_insns ();
1968 /* These can be done a word at a time by propagating carries. */
1969 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1970 && mclass
== MODE_INT
1971 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1972 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1975 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1976 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1977 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1978 rtx xop0
, xop1
, xtarget
;
1980 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1981 value is one of those, use it. Otherwise, use 1 since it is the
1982 one easiest to get. */
1983 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1984 int normalizep
= STORE_FLAG_VALUE
;
1989 /* Prepare the operands. */
1990 xop0
= force_reg (mode
, op0
);
1991 xop1
= force_reg (mode
, op1
);
1993 xtarget
= gen_reg_rtx (mode
);
1995 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1998 /* Indicate for flow that the entire target reg is being set. */
2000 emit_clobber (xtarget
);
2002 /* Do the actual arithmetic. */
2003 for (i
= 0; i
< nwords
; i
++)
2005 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
2006 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
2007 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
2008 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
2011 /* Main add/subtract of the input operands. */
2012 x
= expand_binop (word_mode
, binoptab
,
2013 op0_piece
, op1_piece
,
2014 target_piece
, unsignedp
, next_methods
);
2020 /* Store carry from main add/subtract. */
2021 carry_out
= gen_reg_rtx (word_mode
);
2022 carry_out
= emit_store_flag_force (carry_out
,
2023 (binoptab
== add_optab
2026 word_mode
, 1, normalizep
);
2033 /* Add/subtract previous carry to main result. */
2034 newx
= expand_binop (word_mode
,
2035 normalizep
== 1 ? binoptab
: otheroptab
,
2037 NULL_RTX
, 1, next_methods
);
2041 /* Get out carry from adding/subtracting carry in. */
2042 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2043 carry_tmp
= emit_store_flag_force (carry_tmp
,
2044 (binoptab
== add_optab
2047 word_mode
, 1, normalizep
);
2049 /* Logical-ior the two poss. carry together. */
2050 carry_out
= expand_binop (word_mode
, ior_optab
,
2051 carry_out
, carry_tmp
,
2052 carry_out
, 0, next_methods
);
2056 emit_move_insn (target_piece
, newx
);
2060 if (x
!= target_piece
)
2061 emit_move_insn (target_piece
, x
);
2064 carry_in
= carry_out
;
2067 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
2069 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
2070 || ! rtx_equal_p (target
, xtarget
))
2072 rtx temp
= emit_move_insn (target
, xtarget
);
2074 set_dst_reg_note (temp
, REG_EQUAL
,
2075 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2076 mode
, copy_rtx (xop0
),
2087 delete_insns_since (last
);
2090 /* Attempt to synthesize double word multiplies using a sequence of word
2091 mode multiplications. We first attempt to generate a sequence using a
2092 more efficient unsigned widening multiply, and if that fails we then
2093 try using a signed widening multiply. */
2095 if (binoptab
== smul_optab
2096 && mclass
== MODE_INT
2097 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2098 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2099 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2101 rtx product
= NULL_RTX
;
2102 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
2103 != CODE_FOR_nothing
)
2105 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2108 delete_insns_since (last
);
2111 if (product
== NULL_RTX
2112 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
2113 != CODE_FOR_nothing
)
2115 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
2118 delete_insns_since (last
);
2121 if (product
!= NULL_RTX
)
2123 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
2125 temp
= emit_move_insn (target
? target
: product
, product
);
2126 set_dst_reg_note (temp
,
2128 gen_rtx_fmt_ee (MULT
, mode
,
2131 target
? target
: product
);
2137 /* It can't be open-coded in this mode.
2138 Use a library call if one is available and caller says that's ok. */
2140 libfunc
= optab_libfunc (binoptab
, mode
);
2142 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2146 enum machine_mode op1_mode
= mode
;
2151 if (shift_optab_p (binoptab
))
2153 op1_mode
= targetm
.libgcc_shift_count_mode ();
2154 /* Specify unsigned here,
2155 since negative shift counts are meaningless. */
2156 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2159 if (GET_MODE (op0
) != VOIDmode
2160 && GET_MODE (op0
) != mode
)
2161 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2163 /* Pass 1 for NO_QUEUE so we don't lose any increments
2164 if the libcall is cse'd or moved. */
2165 value
= emit_library_call_value (libfunc
,
2166 NULL_RTX
, LCT_CONST
, mode
, 2,
2167 op0
, mode
, op1x
, op1_mode
);
2169 insns
= get_insns ();
2172 target
= gen_reg_rtx (mode
);
2173 emit_libcall_block_1 (insns
, target
, value
,
2174 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2176 trapv_binoptab_p (binoptab
));
2181 delete_insns_since (last
);
2183 /* It can't be done in this mode. Can we do it in a wider mode? */
2185 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2186 || methods
== OPTAB_MUST_WIDEN
))
2188 /* Caller says, don't even try. */
2189 delete_insns_since (entry_last
);
2193 /* Compute the value of METHODS to pass to recursive calls.
2194 Don't allow widening to be tried recursively. */
2196 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2198 /* Look for a wider mode of the same class for which it appears we can do
2201 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2203 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2204 wider_mode
!= VOIDmode
;
2205 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2207 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
2209 || (methods
== OPTAB_LIB
2210 && optab_libfunc (binoptab
, wider_mode
)))
2212 rtx xop0
= op0
, xop1
= op1
;
2215 /* For certain integer operations, we need not actually extend
2216 the narrow operands, as long as we will truncate
2217 the results to the same narrowness. */
2219 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2220 || binoptab
== xor_optab
2221 || binoptab
== add_optab
|| binoptab
== sub_optab
2222 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2223 && mclass
== MODE_INT
)
2226 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2227 unsignedp
, no_extend
);
2229 /* The second operand of a shift must always be extended. */
2230 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2231 no_extend
&& binoptab
!= ashl_optab
);
2233 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2234 unsignedp
, methods
);
2237 if (mclass
!= MODE_INT
2238 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2241 target
= gen_reg_rtx (mode
);
2242 convert_move (target
, temp
, 0);
2246 return gen_lowpart (mode
, temp
);
2249 delete_insns_since (last
);
2254 delete_insns_since (entry_last
);
2258 /* Expand a binary operator which has both signed and unsigned forms.
2259 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2262 If we widen unsigned operands, we may use a signed wider operation instead
2263 of an unsigned wider operation, since the result would be the same. */
2266 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2267 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2268 enum optab_methods methods
)
2271 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2274 /* Do it without widening, if possible. */
2275 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2276 unsignedp
, OPTAB_DIRECT
);
2277 if (temp
|| methods
== OPTAB_DIRECT
)
2280 /* Try widening to a signed int. Disable any direct use of any
2281 signed insn in the current mode. */
2282 save_enable
= swap_optab_enable (soptab
, mode
, false);
2284 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2285 unsignedp
, OPTAB_WIDEN
);
2287 /* For unsigned operands, try widening to an unsigned int. */
2288 if (!temp
&& unsignedp
)
2289 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2290 unsignedp
, OPTAB_WIDEN
);
2291 if (temp
|| methods
== OPTAB_WIDEN
)
2294 /* Use the right width libcall if that exists. */
2295 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2296 unsignedp
, OPTAB_LIB
);
2297 if (temp
|| methods
== OPTAB_LIB
)
2300 /* Must widen and use a libcall, use either signed or unsigned. */
2301 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2302 unsignedp
, methods
);
2303 if (!temp
&& unsignedp
)
2304 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2305 unsignedp
, methods
);
2308 /* Undo the fiddling above. */
2310 swap_optab_enable (soptab
, mode
, true);
2314 /* Generate code to perform an operation specified by UNOPPTAB
2315 on operand OP0, with two results to TARG0 and TARG1.
2316 We assume that the order of the operands for the instruction
2317 is TARG0, TARG1, OP0.
2319 Either TARG0 or TARG1 may be zero, but what that means is that
2320 the result is not actually wanted. We will generate it into
2321 a dummy pseudo-reg and discard it. They may not both be zero.
2323 Returns 1 if this operation can be performed; 0 if not. */
2326 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2329 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2330 enum mode_class mclass
;
2331 enum machine_mode wider_mode
;
2332 rtx_insn
*entry_last
= get_last_insn ();
2335 mclass
= GET_MODE_CLASS (mode
);
2338 targ0
= gen_reg_rtx (mode
);
2340 targ1
= gen_reg_rtx (mode
);
2342 /* Record where to go back to if we fail. */
2343 last
= get_last_insn ();
2345 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2347 struct expand_operand ops
[3];
2348 enum insn_code icode
= optab_handler (unoptab
, mode
);
2350 create_fixed_operand (&ops
[0], targ0
);
2351 create_fixed_operand (&ops
[1], targ1
);
2352 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2353 if (maybe_expand_insn (icode
, 3, ops
))
2357 /* It can't be done in this mode. Can we do it in a wider mode? */
2359 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2361 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2362 wider_mode
!= VOIDmode
;
2363 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2365 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2367 rtx t0
= gen_reg_rtx (wider_mode
);
2368 rtx t1
= gen_reg_rtx (wider_mode
);
2369 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2371 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2373 convert_move (targ0
, t0
, unsignedp
);
2374 convert_move (targ1
, t1
, unsignedp
);
2378 delete_insns_since (last
);
2383 delete_insns_since (entry_last
);
2387 /* Generate code to perform an operation specified by BINOPTAB
2388 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2389 We assume that the order of the operands for the instruction
2390 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2391 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2393 Either TARG0 or TARG1 may be zero, but what that means is that
2394 the result is not actually wanted. We will generate it into
2395 a dummy pseudo-reg and discard it. They may not both be zero.
2397 Returns 1 if this operation can be performed; 0 if not. */
2400 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2403 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2404 enum mode_class mclass
;
2405 enum machine_mode wider_mode
;
2406 rtx_insn
*entry_last
= get_last_insn ();
2409 mclass
= GET_MODE_CLASS (mode
);
2412 targ0
= gen_reg_rtx (mode
);
2414 targ1
= gen_reg_rtx (mode
);
2416 /* Record where to go back to if we fail. */
2417 last
= get_last_insn ();
2419 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2421 struct expand_operand ops
[4];
2422 enum insn_code icode
= optab_handler (binoptab
, mode
);
2423 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2424 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2425 rtx xop0
= op0
, xop1
= op1
;
2427 /* If we are optimizing, force expensive constants into a register. */
2428 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2429 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2431 create_fixed_operand (&ops
[0], targ0
);
2432 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2433 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2434 create_fixed_operand (&ops
[3], targ1
);
2435 if (maybe_expand_insn (icode
, 4, ops
))
2437 delete_insns_since (last
);
2440 /* It can't be done in this mode. Can we do it in a wider mode? */
2442 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2444 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2445 wider_mode
!= VOIDmode
;
2446 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2448 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2450 rtx t0
= gen_reg_rtx (wider_mode
);
2451 rtx t1
= gen_reg_rtx (wider_mode
);
2452 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2453 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2455 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2458 convert_move (targ0
, t0
, unsignedp
);
2459 convert_move (targ1
, t1
, unsignedp
);
2463 delete_insns_since (last
);
2468 delete_insns_since (entry_last
);
2472 /* Expand the two-valued library call indicated by BINOPTAB, but
2473 preserve only one of the values. If TARG0 is non-NULL, the first
2474 value is placed into TARG0; otherwise the second value is placed
2475 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2476 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2477 This routine assumes that the value returned by the library call is
2478 as if the return value was of an integral mode twice as wide as the
2479 mode of OP0. Returns 1 if the call was successful. */
2482 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2483 rtx targ0
, rtx targ1
, enum rtx_code code
)
2485 enum machine_mode mode
;
2486 enum machine_mode libval_mode
;
2491 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2492 gcc_assert (!targ0
!= !targ1
);
2494 mode
= GET_MODE (op0
);
2495 libfunc
= optab_libfunc (binoptab
, mode
);
2499 /* The value returned by the library function will have twice as
2500 many bits as the nominal MODE. */
2501 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2504 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2508 /* Get the part of VAL containing the value that we want. */
2509 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2510 targ0
? 0 : GET_MODE_SIZE (mode
));
2511 insns
= get_insns ();
2513 /* Move the into the desired location. */
2514 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2515 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2521 /* Wrapper around expand_unop which takes an rtx code to specify
2522 the operation to perform, not an optab pointer. All other
2523 arguments are the same. */
2525 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2526 rtx target
, int unsignedp
)
2528 optab unop
= code_to_optab (code
);
2531 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2537 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2539 A similar operation can be used for clrsb. UNOPTAB says which operation
2540 we are trying to expand. */
2542 widen_leading (enum machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2544 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2545 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2547 enum machine_mode wider_mode
;
2548 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2549 wider_mode
!= VOIDmode
;
2550 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2552 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2557 last
= get_last_insn ();
2560 target
= gen_reg_rtx (mode
);
2561 xop0
= widen_operand (op0
, wider_mode
, mode
,
2562 unoptab
!= clrsb_optab
, false);
2563 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2564 unoptab
!= clrsb_optab
);
2567 (wider_mode
, sub_optab
, temp
,
2568 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2569 - GET_MODE_PRECISION (mode
),
2571 target
, true, OPTAB_DIRECT
);
2573 delete_insns_since (last
);
2582 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2583 quantities, choosing which based on whether the high word is nonzero. */
2585 expand_doubleword_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2587 rtx xop0
= force_reg (mode
, op0
);
2588 rtx subhi
= gen_highpart (word_mode
, xop0
);
2589 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2590 rtx_code_label
*hi0_label
= gen_label_rtx ();
2591 rtx_code_label
*after_label
= gen_label_rtx ();
2595 /* If we were not given a target, use a word_mode register, not a
2596 'mode' register. The result will fit, and nobody is expecting
2597 anything bigger (the return type of __builtin_clz* is int). */
2599 target
= gen_reg_rtx (word_mode
);
2601 /* In any case, write to a word_mode scratch in both branches of the
2602 conditional, so we can ensure there is a single move insn setting
2603 'target' to tag a REG_EQUAL note on. */
2604 result
= gen_reg_rtx (word_mode
);
2608 /* If the high word is not equal to zero,
2609 then clz of the full value is clz of the high word. */
2610 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2611 word_mode
, true, hi0_label
);
2613 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2618 convert_move (result
, temp
, true);
2620 emit_jump_insn (gen_jump (after_label
));
2623 /* Else clz of the full value is clz of the low word plus the number
2624 of bits in the high word. */
2625 emit_label (hi0_label
);
2627 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2630 temp
= expand_binop (word_mode
, add_optab
, temp
,
2631 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2632 result
, true, OPTAB_DIRECT
);
2636 convert_move (result
, temp
, true);
2638 emit_label (after_label
);
2639 convert_move (target
, result
, true);
2644 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2656 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2658 widen_bswap (enum machine_mode mode
, rtx op0
, rtx target
)
2660 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2661 enum machine_mode wider_mode
;
2665 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2668 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2669 wider_mode
!= VOIDmode
;
2670 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2671 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2676 last
= get_last_insn ();
2678 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2679 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2681 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2682 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2684 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2685 GET_MODE_BITSIZE (wider_mode
)
2686 - GET_MODE_BITSIZE (mode
),
2692 target
= gen_reg_rtx (mode
);
2693 emit_move_insn (target
, gen_lowpart (mode
, x
));
2696 delete_insns_since (last
);
2701 /* Try calculating bswap as two bswaps of two word-sized operands. */
2704 expand_doubleword_bswap (enum machine_mode mode
, rtx op
, rtx target
)
2708 t1
= expand_unop (word_mode
, bswap_optab
,
2709 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2710 t0
= expand_unop (word_mode
, bswap_optab
,
2711 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2713 if (target
== 0 || !valid_multiword_target_p (target
))
2714 target
= gen_reg_rtx (mode
);
2716 emit_clobber (target
);
2717 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2718 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2723 /* Try calculating (parity x) as (and (popcount x) 1), where
2724 popcount can also be done in a wider mode. */
2726 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2728 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2729 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2731 enum machine_mode wider_mode
;
2732 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2733 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2735 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2740 last
= get_last_insn ();
2743 target
= gen_reg_rtx (mode
);
2744 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2745 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2748 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2749 target
, true, OPTAB_DIRECT
);
2751 delete_insns_since (last
);
2760 /* Try calculating ctz(x) as K - clz(x & -x) ,
2761 where K is GET_MODE_PRECISION(mode) - 1.
2763 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2764 don't have to worry about what the hardware does in that case. (If
2765 the clz instruction produces the usual value at 0, which is K, the
2766 result of this code sequence will be -1; expand_ffs, below, relies
2767 on this. It might be nice to have it be K instead, for consistency
2768 with the (very few) processors that provide a ctz with a defined
2769 value, but that would take one more instruction, and it would be
2770 less convenient for expand_ffs anyway. */
2773 expand_ctz (enum machine_mode mode
, rtx op0
, rtx target
)
2778 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2783 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2785 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2786 true, OPTAB_DIRECT
);
2788 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2790 temp
= expand_binop (mode
, sub_optab
,
2791 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2793 true, OPTAB_DIRECT
);
2803 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2809 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2810 else with the sequence used by expand_clz.
2812 The ffs builtin promises to return zero for a zero value and ctz/clz
2813 may have an undefined value in that case. If they do not give us a
2814 convenient value, we have to generate a test and branch. */
2816 expand_ffs (enum machine_mode mode
, rtx op0
, rtx target
)
2818 HOST_WIDE_INT val
= 0;
2819 bool defined_at_zero
= false;
2823 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2827 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2831 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2833 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2836 temp
= expand_ctz (mode
, op0
, 0);
2840 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2842 defined_at_zero
= true;
2843 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2849 if (defined_at_zero
&& val
== -1)
2850 /* No correction needed at zero. */;
2853 /* We don't try to do anything clever with the situation found
2854 on some processors (eg Alpha) where ctz(0:mode) ==
2855 bitsize(mode). If someone can think of a way to send N to -1
2856 and leave alone all values in the range 0..N-1 (where N is a
2857 power of two), cheaper than this test-and-branch, please add it.
2859 The test-and-branch is done after the operation itself, in case
2860 the operation sets condition codes that can be recycled for this.
2861 (This is true on i386, for instance.) */
2863 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2864 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2865 mode
, true, nonzero_label
);
2867 convert_move (temp
, GEN_INT (-1), false);
2868 emit_label (nonzero_label
);
2871 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2872 to produce a value in the range 0..bitsize. */
2873 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2874 target
, false, OPTAB_DIRECT
);
2881 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2890 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2891 conditions, VAL may already be a SUBREG against which we cannot generate
2892 a further SUBREG. In this case, we expect forcing the value into a
2893 register will work around the situation. */
2896 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2897 enum machine_mode imode
)
2900 ret
= lowpart_subreg (omode
, val
, imode
);
2903 val
= force_reg (imode
, val
);
2904 ret
= lowpart_subreg (omode
, val
, imode
);
2905 gcc_assert (ret
!= NULL
);
2910 /* Expand a floating point absolute value or negation operation via a
2911 logical operation on the sign bit. */
2914 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2915 rtx op0
, rtx target
)
2917 const struct real_format
*fmt
;
2918 int bitpos
, word
, nwords
, i
;
2919 enum machine_mode imode
;
2923 /* The format has to have a simple sign bit. */
2924 fmt
= REAL_MODE_FORMAT (mode
);
2928 bitpos
= fmt
->signbit_rw
;
2932 /* Don't create negative zeros if the format doesn't support them. */
2933 if (code
== NEG
&& !fmt
->has_signed_zero
)
2936 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2938 imode
= int_mode_for_mode (mode
);
2939 if (imode
== BLKmode
)
2948 if (FLOAT_WORDS_BIG_ENDIAN
)
2949 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2951 word
= bitpos
/ BITS_PER_WORD
;
2952 bitpos
= bitpos
% BITS_PER_WORD
;
2953 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2956 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2962 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2963 target
= gen_reg_rtx (mode
);
2969 for (i
= 0; i
< nwords
; ++i
)
2971 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2972 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2976 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2978 immed_wide_int_const (mask
, imode
),
2979 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2980 if (temp
!= targ_piece
)
2981 emit_move_insn (targ_piece
, temp
);
2984 emit_move_insn (targ_piece
, op0_piece
);
2987 insns
= get_insns ();
2994 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2995 gen_lowpart (imode
, op0
),
2996 immed_wide_int_const (mask
, imode
),
2997 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2998 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3000 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
3001 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
3008 /* As expand_unop, but will fail rather than attempt the operation in a
3009 different mode or with a libcall. */
3011 expand_unop_direct (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3014 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
3016 struct expand_operand ops
[2];
3017 enum insn_code icode
= optab_handler (unoptab
, mode
);
3018 rtx_insn
*last
= get_last_insn ();
3021 create_output_operand (&ops
[0], target
, mode
);
3022 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
3023 pat
= maybe_gen_insn (icode
, 2, ops
);
3026 if (INSN_P (pat
) && NEXT_INSN (as_a
<rtx_insn
*> (pat
)) != NULL_RTX
3027 && ! add_equal_note (as_a
<rtx_insn
*> (pat
), ops
[0].value
,
3028 optab_to_code (unoptab
),
3029 ops
[1].value
, NULL_RTX
))
3031 delete_insns_since (last
);
3032 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3037 return ops
[0].value
;
3043 /* Generate code to perform an operation specified by UNOPTAB
3044 on operand OP0, with result having machine-mode MODE.
3046 UNSIGNEDP is for the case where we have to widen the operands
3047 to perform the operation. It says to use zero-extension.
3049 If TARGET is nonzero, the value
3050 is generated there, if it is convenient to do so.
3051 In all cases an rtx is returned for the locus of the value;
3052 this may or may not be TARGET. */
3055 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3058 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3059 enum machine_mode wider_mode
;
3063 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3067 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3069 /* Widening (or narrowing) clz needs special treatment. */
3070 if (unoptab
== clz_optab
)
3072 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3076 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3077 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3079 temp
= expand_doubleword_clz (mode
, op0
, target
);
3087 if (unoptab
== clrsb_optab
)
3089 temp
= widen_leading (mode
, op0
, target
, unoptab
);
3095 /* Widening (or narrowing) bswap needs special treatment. */
3096 if (unoptab
== bswap_optab
)
3098 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3099 or ROTATERT. First try these directly; if this fails, then try the
3100 obvious pair of shifts with allowed widening, as this will probably
3101 be always more efficient than the other fallback methods. */
3107 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
3109 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
3110 unsignedp
, OPTAB_DIRECT
);
3115 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
3117 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
3118 unsignedp
, OPTAB_DIRECT
);
3123 last
= get_last_insn ();
3125 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
3126 unsignedp
, OPTAB_WIDEN
);
3127 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
3128 unsignedp
, OPTAB_WIDEN
);
3131 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
3132 unsignedp
, OPTAB_WIDEN
);
3137 delete_insns_since (last
);
3140 temp
= widen_bswap (mode
, op0
, target
);
3144 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
3145 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3147 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3155 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3156 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3157 wider_mode
!= VOIDmode
;
3158 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3160 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3163 rtx_insn
*last
= get_last_insn ();
3165 /* For certain operations, we need not actually extend
3166 the narrow operand, as long as we will truncate the
3167 results to the same narrowness. */
3169 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3170 (unoptab
== neg_optab
3171 || unoptab
== one_cmpl_optab
)
3172 && mclass
== MODE_INT
);
3174 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3179 if (mclass
!= MODE_INT
3180 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3183 target
= gen_reg_rtx (mode
);
3184 convert_move (target
, temp
, 0);
3188 return gen_lowpart (mode
, temp
);
3191 delete_insns_since (last
);
3195 /* These can be done a word at a time. */
3196 if (unoptab
== one_cmpl_optab
3197 && mclass
== MODE_INT
3198 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
3199 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3204 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
3205 target
= gen_reg_rtx (mode
);
3209 /* Do the actual arithmetic. */
3210 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
3212 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
3213 rtx x
= expand_unop (word_mode
, unoptab
,
3214 operand_subword_force (op0
, i
, mode
),
3215 target_piece
, unsignedp
);
3217 if (target_piece
!= x
)
3218 emit_move_insn (target_piece
, x
);
3221 insns
= get_insns ();
3228 if (optab_to_code (unoptab
) == NEG
)
3230 /* Try negating floating point values by flipping the sign bit. */
3231 if (SCALAR_FLOAT_MODE_P (mode
))
3233 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
3238 /* If there is no negation pattern, and we have no negative zero,
3239 try subtracting from zero. */
3240 if (!HONOR_SIGNED_ZEROS (mode
))
3242 temp
= expand_binop (mode
, (unoptab
== negv_optab
3243 ? subv_optab
: sub_optab
),
3244 CONST0_RTX (mode
), op0
, target
,
3245 unsignedp
, OPTAB_DIRECT
);
3251 /* Try calculating parity (x) as popcount (x) % 2. */
3252 if (unoptab
== parity_optab
)
3254 temp
= expand_parity (mode
, op0
, target
);
3259 /* Try implementing ffs (x) in terms of clz (x). */
3260 if (unoptab
== ffs_optab
)
3262 temp
= expand_ffs (mode
, op0
, target
);
3267 /* Try implementing ctz (x) in terms of clz (x). */
3268 if (unoptab
== ctz_optab
)
3270 temp
= expand_ctz (mode
, op0
, target
);
3276 /* Now try a library call in this mode. */
3277 libfunc
= optab_libfunc (unoptab
, mode
);
3283 enum machine_mode outmode
= mode
;
3285 /* All of these functions return small values. Thus we choose to
3286 have them return something that isn't a double-word. */
3287 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3288 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3289 || unoptab
== parity_optab
)
3291 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3292 optab_libfunc (unoptab
, mode
)));
3296 /* Pass 1 for NO_QUEUE so we don't lose any increments
3297 if the libcall is cse'd or moved. */
3298 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3300 insns
= get_insns ();
3303 target
= gen_reg_rtx (outmode
);
3304 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3305 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
3306 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3307 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
3308 eq_value
= simplify_gen_unary (ZERO_EXTEND
, outmode
, eq_value
, mode
);
3309 emit_libcall_block_1 (insns
, target
, value
, eq_value
,
3310 trapv_unoptab_p (unoptab
));
3315 /* It can't be done in this mode. Can we do it in a wider mode? */
3317 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3319 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3320 wider_mode
!= VOIDmode
;
3321 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3323 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3324 || optab_libfunc (unoptab
, wider_mode
))
3327 rtx_insn
*last
= get_last_insn ();
3329 /* For certain operations, we need not actually extend
3330 the narrow operand, as long as we will truncate the
3331 results to the same narrowness. */
3332 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3333 (unoptab
== neg_optab
3334 || unoptab
== one_cmpl_optab
3335 || unoptab
== bswap_optab
)
3336 && mclass
== MODE_INT
);
3338 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3341 /* If we are generating clz using wider mode, adjust the
3342 result. Similarly for clrsb. */
3343 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3346 (wider_mode
, sub_optab
, temp
,
3347 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
3348 - GET_MODE_PRECISION (mode
),
3350 target
, true, OPTAB_DIRECT
);
3352 /* Likewise for bswap. */
3353 if (unoptab
== bswap_optab
&& temp
!= 0)
3355 gcc_assert (GET_MODE_PRECISION (wider_mode
)
3356 == GET_MODE_BITSIZE (wider_mode
)
3357 && GET_MODE_PRECISION (mode
)
3358 == GET_MODE_BITSIZE (mode
));
3360 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
3361 GET_MODE_BITSIZE (wider_mode
)
3362 - GET_MODE_BITSIZE (mode
),
3368 if (mclass
!= MODE_INT
)
3371 target
= gen_reg_rtx (mode
);
3372 convert_move (target
, temp
, 0);
3376 return gen_lowpart (mode
, temp
);
3379 delete_insns_since (last
);
3384 /* One final attempt at implementing negation via subtraction,
3385 this time allowing widening of the operand. */
3386 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3389 temp
= expand_binop (mode
,
3390 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3391 CONST0_RTX (mode
), op0
,
3392 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3400 /* Emit code to compute the absolute value of OP0, with result to
3401 TARGET if convenient. (TARGET may be 0.) The return value says
3402 where the result actually is to be found.
3404 MODE is the mode of the operand; the mode of the result is
3405 different but can be deduced from MODE.
3410 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
3411 int result_unsignedp
)
3415 if (GET_MODE_CLASS (mode
) != MODE_INT
3417 result_unsignedp
= 1;
3419 /* First try to do it with a special abs instruction. */
3420 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3425 /* For floating point modes, try clearing the sign bit. */
3426 if (SCALAR_FLOAT_MODE_P (mode
))
3428 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3433 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3434 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3435 && !HONOR_SIGNED_ZEROS (mode
))
3437 rtx_insn
*last
= get_last_insn ();
3439 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3442 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3448 delete_insns_since (last
);
3451 /* If this machine has expensive jumps, we can do integer absolute
3452 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3453 where W is the width of MODE. */
3455 if (GET_MODE_CLASS (mode
) == MODE_INT
3456 && BRANCH_COST (optimize_insn_for_speed_p (),
3459 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3460 GET_MODE_PRECISION (mode
) - 1,
3463 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3466 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3467 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3477 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
3478 int result_unsignedp
, int safe
)
3481 rtx_code_label
*op1
;
3483 if (GET_MODE_CLASS (mode
) != MODE_INT
3485 result_unsignedp
= 1;
3487 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3491 /* If that does not win, use conditional jump and negate. */
3493 /* It is safe to use the target if it is the same
3494 as the source if this is also a pseudo register */
3495 if (op0
== target
&& REG_P (op0
)
3496 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3499 op1
= gen_label_rtx ();
3500 if (target
== 0 || ! safe
3501 || GET_MODE (target
) != mode
3502 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3504 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3505 target
= gen_reg_rtx (mode
);
3507 emit_move_insn (target
, op0
);
3510 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3511 NULL_RTX
, NULL_RTX
, op1
, -1);
3513 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3516 emit_move_insn (target
, op0
);
3522 /* Emit code to compute the one's complement absolute value of OP0
3523 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3524 (TARGET may be NULL_RTX.) The return value says where the result
3525 actually is to be found.
3527 MODE is the mode of the operand; the mode of the result is
3528 different but can be deduced from MODE. */
3531 expand_one_cmpl_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
)
3535 /* Not applicable for floating point modes. */
3536 if (FLOAT_MODE_P (mode
))
3539 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3540 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3542 rtx_insn
*last
= get_last_insn ();
3544 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3546 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3552 delete_insns_since (last
);
3555 /* If this machine has expensive jumps, we can do one's complement
3556 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3558 if (GET_MODE_CLASS (mode
) == MODE_INT
3559 && BRANCH_COST (optimize_insn_for_speed_p (),
3562 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3563 GET_MODE_PRECISION (mode
) - 1,
3566 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3576 /* A subroutine of expand_copysign, perform the copysign operation using the
3577 abs and neg primitives advertised to exist on the target. The assumption
3578 is that we have a split register file, and leaving op0 in fp registers,
3579 and not playing with subregs so much, will help the register allocator. */
3582 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3583 int bitpos
, bool op0_is_abs
)
3585 enum machine_mode imode
;
3586 enum insn_code icode
;
3588 rtx_code_label
*label
;
3593 /* Check if the back end provides an insn that handles signbit for the
3595 icode
= optab_handler (signbit_optab
, mode
);
3596 if (icode
!= CODE_FOR_nothing
)
3598 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3599 sign
= gen_reg_rtx (imode
);
3600 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3604 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3606 imode
= int_mode_for_mode (mode
);
3607 if (imode
== BLKmode
)
3609 op1
= gen_lowpart (imode
, op1
);
3616 if (FLOAT_WORDS_BIG_ENDIAN
)
3617 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3619 word
= bitpos
/ BITS_PER_WORD
;
3620 bitpos
= bitpos
% BITS_PER_WORD
;
3621 op1
= operand_subword_force (op1
, word
, mode
);
3624 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3625 sign
= expand_binop (imode
, and_optab
, op1
,
3626 immed_wide_int_const (mask
, imode
),
3627 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3632 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3639 if (target
== NULL_RTX
)
3640 target
= copy_to_reg (op0
);
3642 emit_move_insn (target
, op0
);
3645 label
= gen_label_rtx ();
3646 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3648 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3649 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3651 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3653 emit_move_insn (target
, op0
);
3661 /* A subroutine of expand_copysign, perform the entire copysign operation
3662 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3663 is true if op0 is known to have its sign bit clear. */
3666 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3667 int bitpos
, bool op0_is_abs
)
3669 enum machine_mode imode
;
3670 int word
, nwords
, i
;
3674 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3676 imode
= int_mode_for_mode (mode
);
3677 if (imode
== BLKmode
)
3686 if (FLOAT_WORDS_BIG_ENDIAN
)
3687 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3689 word
= bitpos
/ BITS_PER_WORD
;
3690 bitpos
= bitpos
% BITS_PER_WORD
;
3691 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3694 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3699 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3700 target
= gen_reg_rtx (mode
);
3706 for (i
= 0; i
< nwords
; ++i
)
3708 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3709 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3715 = expand_binop (imode
, and_optab
, op0_piece
,
3716 immed_wide_int_const (~mask
, imode
),
3717 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3718 op1
= expand_binop (imode
, and_optab
,
3719 operand_subword_force (op1
, i
, mode
),
3720 immed_wide_int_const (mask
, imode
),
3721 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3723 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3724 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3725 if (temp
!= targ_piece
)
3726 emit_move_insn (targ_piece
, temp
);
3729 emit_move_insn (targ_piece
, op0_piece
);
3732 insns
= get_insns ();
3739 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3740 immed_wide_int_const (mask
, imode
),
3741 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3743 op0
= gen_lowpart (imode
, op0
);
3745 op0
= expand_binop (imode
, and_optab
, op0
,
3746 immed_wide_int_const (~mask
, imode
),
3747 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3749 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3750 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3751 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3757 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3758 scalar floating point mode. Return NULL if we do not know how to
3759 expand the operation inline. */
3762 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3764 enum machine_mode mode
= GET_MODE (op0
);
3765 const struct real_format
*fmt
;
3769 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3770 gcc_assert (GET_MODE (op1
) == mode
);
3772 /* First try to do it with a special instruction. */
3773 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3774 target
, 0, OPTAB_DIRECT
);
3778 fmt
= REAL_MODE_FORMAT (mode
);
3779 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3783 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3785 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3786 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3790 if (fmt
->signbit_ro
>= 0
3791 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3792 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3793 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3795 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3796 fmt
->signbit_ro
, op0_is_abs
);
3801 if (fmt
->signbit_rw
< 0)
3803 return expand_copysign_bit (mode
, op0
, op1
, target
,
3804 fmt
->signbit_rw
, op0_is_abs
);
3807 /* Generate an instruction whose insn-code is INSN_CODE,
3808 with two operands: an output TARGET and an input OP0.
3809 TARGET *must* be nonzero, and the output is always stored there.
3810 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3811 the value that is stored into TARGET.
3813 Return false if expansion failed. */
3816 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3819 struct expand_operand ops
[2];
3822 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3823 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3824 pat
= maybe_gen_insn (icode
, 2, ops
);
3828 if (INSN_P (pat
) && NEXT_INSN (as_a
<rtx_insn
*> (pat
)) != NULL_RTX
3830 add_equal_note (as_a
<rtx_insn
*> (pat
), ops
[0].value
, code
, ops
[1].value
,
3835 if (ops
[0].value
!= target
)
3836 emit_move_insn (target
, ops
[0].value
);
3839 /* Generate an instruction whose insn-code is INSN_CODE,
3840 with two operands: an output TARGET and an input OP0.
3841 TARGET *must* be nonzero, and the output is always stored there.
3842 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3843 the value that is stored into TARGET. */
3846 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3848 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3852 struct no_conflict_data
3855 rtx_insn
*first
, *insn
;
3859 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3860 the currently examined clobber / store has to stay in the list of
3861 insns that constitute the actual libcall block. */
3863 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3865 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3867 /* If this inns directly contributes to setting the target, it must stay. */
3868 if (reg_overlap_mentioned_p (p
->target
, dest
))
3869 p
->must_stay
= true;
3870 /* If we haven't committed to keeping any other insns in the list yet,
3871 there is nothing more to check. */
3872 else if (p
->insn
== p
->first
)
3874 /* If this insn sets / clobbers a register that feeds one of the insns
3875 already in the list, this insn has to stay too. */
3876 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3877 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3878 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3879 /* Likewise if this insn depends on a register set by a previous
3880 insn in the list, or if it sets a result (presumably a hard
3881 register) that is set or clobbered by a previous insn.
3882 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3883 SET_DEST perform the former check on the address, and the latter
3884 check on the MEM. */
3885 || (GET_CODE (set
) == SET
3886 && (modified_in_p (SET_SRC (set
), p
->first
)
3887 || modified_in_p (SET_DEST (set
), p
->first
)
3888 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3889 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3890 p
->must_stay
= true;
3894 /* Emit code to make a call to a constant function or a library call.
3896 INSNS is a list containing all insns emitted in the call.
3897 These insns leave the result in RESULT. Our block is to copy RESULT
3898 to TARGET, which is logically equivalent to EQUIV.
3900 We first emit any insns that set a pseudo on the assumption that these are
3901 loading constants into registers; doing so allows them to be safely cse'ed
3902 between blocks. Then we emit all the other insns in the block, followed by
3903 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3904 note with an operand of EQUIV. */
3907 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3908 bool equiv_may_trap
)
3910 rtx final_dest
= target
;
3911 rtx_insn
*next
, *last
, *insn
;
3913 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3914 into a MEM later. Protect the libcall block from this change. */
3915 if (! REG_P (target
) || REG_USERVAR_P (target
))
3916 target
= gen_reg_rtx (GET_MODE (target
));
3918 /* If we're using non-call exceptions, a libcall corresponding to an
3919 operation that may trap may also trap. */
3920 /* ??? See the comment in front of make_reg_eh_region_note. */
3921 if (cfun
->can_throw_non_call_exceptions
3922 && (equiv_may_trap
|| may_trap_p (equiv
)))
3924 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3927 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3930 int lp_nr
= INTVAL (XEXP (note
, 0));
3931 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3932 remove_note (insn
, note
);
3938 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3939 reg note to indicate that this call cannot throw or execute a nonlocal
3940 goto (unless there is already a REG_EH_REGION note, in which case
3942 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3944 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3947 /* First emit all insns that set pseudos. Remove them from the list as
3948 we go. Avoid insns that set pseudos which were referenced in previous
3949 insns. These can be generated by move_by_pieces, for example,
3950 to update an address. Similarly, avoid insns that reference things
3951 set in previous insns. */
3953 for (insn
= insns
; insn
; insn
= next
)
3955 rtx set
= single_set (insn
);
3957 next
= NEXT_INSN (insn
);
3959 if (set
!= 0 && REG_P (SET_DEST (set
))
3960 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3962 struct no_conflict_data data
;
3964 data
.target
= const0_rtx
;
3968 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3969 if (! data
.must_stay
)
3971 if (PREV_INSN (insn
))
3972 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3977 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3983 /* Some ports use a loop to copy large arguments onto the stack.
3984 Don't move anything outside such a loop. */
3989 /* Write the remaining insns followed by the final copy. */
3990 for (insn
= insns
; insn
; insn
= next
)
3992 next
= NEXT_INSN (insn
);
3997 last
= emit_move_insn (target
, result
);
3998 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
4000 if (final_dest
!= target
)
4001 emit_move_insn (final_dest
, target
);
4005 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
4007 emit_libcall_block_1 (safe_as_a
<rtx_insn
*> (insns
),
4008 target
, result
, equiv
, false);
4011 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4012 PURPOSE describes how this comparison will be used. CODE is the rtx
4013 comparison code we will be using.
4015 ??? Actually, CODE is slightly weaker than that. A target is still
4016 required to implement all of the normal bcc operations, but not
4017 required to implement all (or any) of the unordered bcc operations. */
4020 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
4021 enum can_compare_purpose purpose
)
4024 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
4027 enum insn_code icode
;
4029 if (purpose
== ccp_jump
4030 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
4031 && insn_operand_matches (icode
, 0, test
))
4033 if (purpose
== ccp_store_flag
4034 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
4035 && insn_operand_matches (icode
, 1, test
))
4037 if (purpose
== ccp_cmov
4038 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
4041 mode
= GET_MODE_WIDER_MODE (mode
);
4042 PUT_MODE (test
, mode
);
4044 while (mode
!= VOIDmode
);
4049 /* This function is called when we are going to emit a compare instruction that
4050 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4052 *PMODE is the mode of the inputs (in case they are const_int).
4053 *PUNSIGNEDP nonzero says that the operands are unsigned;
4054 this matters if they need to be widened (as given by METHODS).
4056 If they have mode BLKmode, then SIZE specifies the size of both operands.
4058 This function performs all the setup necessary so that the caller only has
4059 to emit a single comparison insn. This setup can involve doing a BLKmode
4060 comparison or emitting a library call to perform the comparison if no insn
4061 is available to handle it.
4062 The values which are passed in through pointers can be modified; the caller
4063 should perform the comparison on the modified values. Constant
4064 comparisons must have already been folded. */
4067 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4068 int unsignedp
, enum optab_methods methods
,
4069 rtx
*ptest
, enum machine_mode
*pmode
)
4071 enum machine_mode mode
= *pmode
;
4073 enum machine_mode cmp_mode
;
4074 enum mode_class mclass
;
4076 /* The other methods are not needed. */
4077 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
4078 || methods
== OPTAB_LIB_WIDEN
);
4080 /* If we are optimizing, force expensive constants into a register. */
4081 if (CONSTANT_P (x
) && optimize
4082 && (rtx_cost (x
, COMPARE
, 0, optimize_insn_for_speed_p ())
4083 > COSTS_N_INSNS (1)))
4084 x
= force_reg (mode
, x
);
4086 if (CONSTANT_P (y
) && optimize
4087 && (rtx_cost (y
, COMPARE
, 1, optimize_insn_for_speed_p ())
4088 > COSTS_N_INSNS (1)))
4089 y
= force_reg (mode
, y
);
4092 /* Make sure if we have a canonical comparison. The RTL
4093 documentation states that canonical comparisons are required only
4094 for targets which have cc0. */
4095 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4098 /* Don't let both operands fail to indicate the mode. */
4099 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4100 x
= force_reg (mode
, x
);
4101 if (mode
== VOIDmode
)
4102 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4104 /* Handle all BLKmode compares. */
4106 if (mode
== BLKmode
)
4108 enum machine_mode result_mode
;
4109 enum insn_code cmp_code
;
4114 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4118 /* Try to use a memory block compare insn - either cmpstr
4119 or cmpmem will do. */
4120 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
4121 cmp_mode
!= VOIDmode
;
4122 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
4124 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4125 if (cmp_code
== CODE_FOR_nothing
)
4126 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4127 if (cmp_code
== CODE_FOR_nothing
)
4128 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4129 if (cmp_code
== CODE_FOR_nothing
)
4132 /* Must make sure the size fits the insn's mode. */
4133 if ((CONST_INT_P (size
)
4134 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
4135 || (GET_MODE_BITSIZE (GET_MODE (size
))
4136 > GET_MODE_BITSIZE (cmp_mode
)))
4139 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4140 result
= gen_reg_rtx (result_mode
);
4141 size
= convert_to_mode (cmp_mode
, size
, 1);
4142 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4144 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4145 *pmode
= result_mode
;
4149 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4152 /* Otherwise call a library function, memcmp. */
4153 libfunc
= memcmp_libfunc
;
4154 length_type
= sizetype
;
4155 result_mode
= TYPE_MODE (integer_type_node
);
4156 cmp_mode
= TYPE_MODE (length_type
);
4157 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
4158 TYPE_UNSIGNED (length_type
));
4160 result
= emit_library_call_value (libfunc
, 0, LCT_PURE
,
4168 methods
= OPTAB_LIB_WIDEN
;
4172 /* Don't allow operands to the compare to trap, as that can put the
4173 compare and branch in different basic blocks. */
4174 if (cfun
->can_throw_non_call_exceptions
)
4177 x
= force_reg (mode
, x
);
4179 y
= force_reg (mode
, y
);
4182 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4184 gcc_assert (can_compare_p (comparison
, CCmode
, ccp_jump
));
4185 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4189 mclass
= GET_MODE_CLASS (mode
);
4190 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4194 enum insn_code icode
;
4195 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4196 if (icode
!= CODE_FOR_nothing
4197 && insn_operand_matches (icode
, 0, test
))
4199 rtx_insn
*last
= get_last_insn ();
4200 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4201 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4203 && insn_operand_matches (icode
, 1, op0
)
4204 && insn_operand_matches (icode
, 2, op1
))
4206 XEXP (test
, 0) = op0
;
4207 XEXP (test
, 1) = op1
;
4212 delete_insns_since (last
);
4215 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4217 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
4219 while (cmp_mode
!= VOIDmode
);
4221 if (methods
!= OPTAB_LIB_WIDEN
)
4224 if (!SCALAR_FLOAT_MODE_P (mode
))
4227 enum machine_mode ret_mode
;
4229 /* Handle a libcall just for the mode we are using. */
4230 libfunc
= optab_libfunc (cmp_optab
, mode
);
4231 gcc_assert (libfunc
);
4233 /* If we want unsigned, and this mode has a distinct unsigned
4234 comparison routine, use that. */
4237 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4242 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4243 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4244 ret_mode
, 2, x
, mode
, y
, mode
);
4246 /* There are two kinds of comparison routines. Biased routines
4247 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4248 of gcc expect that the comparison operation is equivalent
4249 to the modified comparison. For signed comparisons compare the
4250 result against 1 in the biased case, and zero in the unbiased
4251 case. For unsigned comparisons always compare against 1 after
4252 biasing the unbiased result by adding 1. This gives us a way to
4254 The comparisons in the fixed-point helper library are always
4259 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4262 x
= plus_constant (ret_mode
, result
, 1);
4268 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4272 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4280 /* Before emitting an insn with code ICODE, make sure that X, which is going
4281 to be used for operand OPNUM of the insn, is converted from mode MODE to
4282 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4283 that it is accepted by the operand predicate. Return the new value. */
4286 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, enum machine_mode mode
,
4287 enum machine_mode wider_mode
, int unsignedp
)
4289 if (mode
!= wider_mode
)
4290 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4292 if (!insn_operand_matches (icode
, opnum
, x
))
4294 if (reload_completed
)
4296 x
= copy_to_mode_reg (insn_data
[(int) icode
].operand
[opnum
].mode
, x
);
4302 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4303 we can do the branch. */
4306 emit_cmp_and_jump_insn_1 (rtx test
, enum machine_mode mode
, rtx label
, int prob
)
4308 enum machine_mode optab_mode
;
4309 enum mode_class mclass
;
4310 enum insn_code icode
;
4313 mclass
= GET_MODE_CLASS (mode
);
4314 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4315 icode
= optab_handler (cbranch_optab
, optab_mode
);
4317 gcc_assert (icode
!= CODE_FOR_nothing
);
4318 gcc_assert (insn_operand_matches (icode
, 0, test
));
4319 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4320 XEXP (test
, 1), label
));
4322 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4325 && any_condjump_p (insn
)
4326 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4327 add_int_reg_note (insn
, REG_BR_PROB
, prob
);
4330 /* Generate code to compare X with Y so that the condition codes are
4331 set and to jump to LABEL if the condition is true. If X is a
4332 constant and Y is not a constant, then the comparison is swapped to
4333 ensure that the comparison RTL has the canonical form.
4335 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4336 need to be widened. UNSIGNEDP is also used to select the proper
4337 branch condition code.
4339 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4341 MODE is the mode of the inputs (in case they are const_int).
4343 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4344 It will be potentially converted into an unsigned variant based on
4345 UNSIGNEDP to select a proper jump instruction.
4347 PROB is the probability of jumping to LABEL. */
4350 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4351 enum machine_mode mode
, int unsignedp
, rtx label
,
4354 rtx op0
= x
, op1
= y
;
4357 /* Swap operands and condition to ensure canonical RTL. */
4358 if (swap_commutative_operands_p (x
, y
)
4359 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4362 comparison
= swap_condition (comparison
);
4365 /* If OP0 is still a constant, then both X and Y must be constants
4366 or the opposite comparison is not supported. Force X into a register
4367 to create canonical RTL. */
4368 if (CONSTANT_P (op0
))
4369 op0
= force_reg (mode
, op0
);
4372 comparison
= unsigned_condition (comparison
);
4374 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4376 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4380 /* Emit a library call comparison between floating point X and Y.
4381 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4384 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4385 rtx
*ptest
, enum machine_mode
*pmode
)
4387 enum rtx_code swapped
= swap_condition (comparison
);
4388 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4389 enum machine_mode orig_mode
= GET_MODE (x
);
4390 enum machine_mode mode
, cmp_mode
;
4391 rtx true_rtx
, false_rtx
;
4392 rtx value
, target
, equiv
;
4395 bool reversed_p
= false;
4396 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4398 for (mode
= orig_mode
;
4400 mode
= GET_MODE_WIDER_MODE (mode
))
4402 if (code_to_optab (comparison
)
4403 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4406 if (code_to_optab (swapped
)
4407 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4410 tmp
= x
; x
= y
; y
= tmp
;
4411 comparison
= swapped
;
4415 if (code_to_optab (reversed
)
4416 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4418 comparison
= reversed
;
4424 gcc_assert (mode
!= VOIDmode
);
4426 if (mode
!= orig_mode
)
4428 x
= convert_to_mode (mode
, x
, 0);
4429 y
= convert_to_mode (mode
, y
, 0);
4432 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4433 the RTL. The allows the RTL optimizers to delete the libcall if the
4434 condition can be determined at compile-time. */
4435 if (comparison
== UNORDERED
4436 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4438 true_rtx
= const_true_rtx
;
4439 false_rtx
= const0_rtx
;
4446 true_rtx
= const0_rtx
;
4447 false_rtx
= const_true_rtx
;
4451 true_rtx
= const_true_rtx
;
4452 false_rtx
= const0_rtx
;
4456 true_rtx
= const1_rtx
;
4457 false_rtx
= const0_rtx
;
4461 true_rtx
= const0_rtx
;
4462 false_rtx
= constm1_rtx
;
4466 true_rtx
= constm1_rtx
;
4467 false_rtx
= const0_rtx
;
4471 true_rtx
= const0_rtx
;
4472 false_rtx
= const1_rtx
;
4480 if (comparison
== UNORDERED
)
4482 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4483 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4484 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4485 temp
, const_true_rtx
, equiv
);
4489 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4490 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4491 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4492 equiv
, true_rtx
, false_rtx
);
4496 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4497 cmp_mode
, 2, x
, mode
, y
, mode
);
4498 insns
= get_insns ();
4501 target
= gen_reg_rtx (cmp_mode
);
4502 emit_libcall_block (insns
, target
, value
, equiv
);
4504 if (comparison
== UNORDERED
4505 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4507 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4509 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4514 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4517 emit_indirect_jump (rtx loc
)
4519 struct expand_operand ops
[1];
4521 create_address_operand (&ops
[0], loc
);
4522 expand_jump_insn (CODE_FOR_indirect_jump
, 1, ops
);
4526 #ifdef HAVE_conditional_move
4528 /* Emit a conditional move instruction if the machine supports one for that
4529 condition and machine mode.
4531 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4532 the mode to use should they be constants. If it is VOIDmode, they cannot
4535 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4536 should be stored there. MODE is the mode to use should they be constants.
4537 If it is VOIDmode, they cannot both be constants.
4539 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4540 is not supported. */
4543 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4544 enum machine_mode cmode
, rtx op2
, rtx op3
,
4545 enum machine_mode mode
, int unsignedp
)
4547 rtx tem
, comparison
;
4549 enum insn_code icode
;
4550 enum rtx_code reversed
;
4552 /* If one operand is constant, make it the second one. Only do this
4553 if the other operand is not constant as well. */
4555 if (swap_commutative_operands_p (op0
, op1
))
4560 code
= swap_condition (code
);
4563 /* get_condition will prefer to generate LT and GT even if the old
4564 comparison was against zero, so undo that canonicalization here since
4565 comparisons against zero are cheaper. */
4566 if (code
== LT
&& op1
== const1_rtx
)
4567 code
= LE
, op1
= const0_rtx
;
4568 else if (code
== GT
&& op1
== constm1_rtx
)
4569 code
= GE
, op1
= const0_rtx
;
4571 if (cmode
== VOIDmode
)
4572 cmode
= GET_MODE (op0
);
4574 if (swap_commutative_operands_p (op2
, op3
)
4575 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4584 if (mode
== VOIDmode
)
4585 mode
= GET_MODE (op2
);
4587 icode
= direct_optab_handler (movcc_optab
, mode
);
4589 if (icode
== CODE_FOR_nothing
)
4593 target
= gen_reg_rtx (mode
);
4595 code
= unsignedp
? unsigned_condition (code
) : code
;
4596 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4598 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4599 return NULL and let the caller figure out how best to deal with this
4601 if (!COMPARISON_P (comparison
))
4604 saved_pending_stack_adjust save
;
4605 save_pending_stack_adjust (&save
);
4606 last
= get_last_insn ();
4607 do_pending_stack_adjust ();
4608 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4609 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4610 &comparison
, &cmode
);
4613 struct expand_operand ops
[4];
4615 create_output_operand (&ops
[0], target
, mode
);
4616 create_fixed_operand (&ops
[1], comparison
);
4617 create_input_operand (&ops
[2], op2
, mode
);
4618 create_input_operand (&ops
[3], op3
, mode
);
4619 if (maybe_expand_insn (icode
, 4, ops
))
4621 if (ops
[0].value
!= target
)
4622 convert_move (target
, ops
[0].value
, false);
4626 delete_insns_since (last
);
4627 restore_pending_stack_adjust (&save
);
4631 /* Return nonzero if a conditional move of mode MODE is supported.
4633 This function is for combine so it can tell whether an insn that looks
4634 like a conditional move is actually supported by the hardware. If we
4635 guess wrong we lose a bit on optimization, but that's it. */
4636 /* ??? sparc64 supports conditionally moving integers values based on fp
4637 comparisons, and vice versa. How do we handle them? */
4640 can_conditionally_move_p (enum machine_mode mode
)
4642 if (direct_optab_handler (movcc_optab
, mode
) != CODE_FOR_nothing
)
4648 #endif /* HAVE_conditional_move */
4650 /* Emit a conditional addition instruction if the machine supports one for that
4651 condition and machine mode.
4653 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4654 the mode to use should they be constants. If it is VOIDmode, they cannot
4657 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4658 should be stored there. MODE is the mode to use should they be constants.
4659 If it is VOIDmode, they cannot both be constants.
4661 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4662 is not supported. */
4665 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4666 enum machine_mode cmode
, rtx op2
, rtx op3
,
4667 enum machine_mode mode
, int unsignedp
)
4669 rtx tem
, comparison
;
4671 enum insn_code icode
;
4673 /* If one operand is constant, make it the second one. Only do this
4674 if the other operand is not constant as well. */
4676 if (swap_commutative_operands_p (op0
, op1
))
4681 code
= swap_condition (code
);
4684 /* get_condition will prefer to generate LT and GT even if the old
4685 comparison was against zero, so undo that canonicalization here since
4686 comparisons against zero are cheaper. */
4687 if (code
== LT
&& op1
== const1_rtx
)
4688 code
= LE
, op1
= const0_rtx
;
4689 else if (code
== GT
&& op1
== constm1_rtx
)
4690 code
= GE
, op1
= const0_rtx
;
4692 if (cmode
== VOIDmode
)
4693 cmode
= GET_MODE (op0
);
4695 if (mode
== VOIDmode
)
4696 mode
= GET_MODE (op2
);
4698 icode
= optab_handler (addcc_optab
, mode
);
4700 if (icode
== CODE_FOR_nothing
)
4704 target
= gen_reg_rtx (mode
);
4706 code
= unsignedp
? unsigned_condition (code
) : code
;
4707 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4709 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4710 return NULL and let the caller figure out how best to deal with this
4712 if (!COMPARISON_P (comparison
))
4715 do_pending_stack_adjust ();
4716 last
= get_last_insn ();
4717 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4718 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4719 &comparison
, &cmode
);
4722 struct expand_operand ops
[4];
4724 create_output_operand (&ops
[0], target
, mode
);
4725 create_fixed_operand (&ops
[1], comparison
);
4726 create_input_operand (&ops
[2], op2
, mode
);
4727 create_input_operand (&ops
[3], op3
, mode
);
4728 if (maybe_expand_insn (icode
, 4, ops
))
4730 if (ops
[0].value
!= target
)
4731 convert_move (target
, ops
[0].value
, false);
4735 delete_insns_since (last
);
4739 /* These functions attempt to generate an insn body, rather than
4740 emitting the insn, but if the gen function already emits them, we
4741 make no attempt to turn them back into naked patterns. */
4743 /* Generate and return an insn body to add Y to X. */
4746 gen_add2_insn (rtx x
, rtx y
)
4748 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4750 gcc_assert (insn_operand_matches (icode
, 0, x
));
4751 gcc_assert (insn_operand_matches (icode
, 1, x
));
4752 gcc_assert (insn_operand_matches (icode
, 2, y
));
4754 return GEN_FCN (icode
) (x
, x
, y
);
4757 /* Generate and return an insn body to add r1 and c,
4758 storing the result in r0. */
4761 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4763 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4765 if (icode
== CODE_FOR_nothing
4766 || !insn_operand_matches (icode
, 0, r0
)
4767 || !insn_operand_matches (icode
, 1, r1
)
4768 || !insn_operand_matches (icode
, 2, c
))
4771 return GEN_FCN (icode
) (r0
, r1
, c
);
4775 have_add2_insn (rtx x
, rtx y
)
4777 enum insn_code icode
;
4779 gcc_assert (GET_MODE (x
) != VOIDmode
);
4781 icode
= optab_handler (add_optab
, GET_MODE (x
));
4783 if (icode
== CODE_FOR_nothing
)
4786 if (!insn_operand_matches (icode
, 0, x
)
4787 || !insn_operand_matches (icode
, 1, x
)
4788 || !insn_operand_matches (icode
, 2, y
))
4794 /* Generate and return an insn body to add Y to X. */
4797 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4799 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4801 gcc_assert (insn_operand_matches (icode
, 0, x
));
4802 gcc_assert (insn_operand_matches (icode
, 1, y
));
4803 gcc_assert (insn_operand_matches (icode
, 2, z
));
4805 return GEN_FCN (icode
) (x
, y
, z
);
4808 /* Return true if the target implements an addptr pattern and X, Y,
4809 and Z are valid for the pattern predicates. */
4812 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4814 enum insn_code icode
;
4816 gcc_assert (GET_MODE (x
) != VOIDmode
);
4818 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4820 if (icode
== CODE_FOR_nothing
)
4823 if (!insn_operand_matches (icode
, 0, x
)
4824 || !insn_operand_matches (icode
, 1, y
)
4825 || !insn_operand_matches (icode
, 2, z
))
4831 /* Generate and return an insn body to subtract Y from X. */
4834 gen_sub2_insn (rtx x
, rtx y
)
4836 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4838 gcc_assert (insn_operand_matches (icode
, 0, x
));
4839 gcc_assert (insn_operand_matches (icode
, 1, x
));
4840 gcc_assert (insn_operand_matches (icode
, 2, y
));
4842 return GEN_FCN (icode
) (x
, x
, y
);
4845 /* Generate and return an insn body to subtract r1 and c,
4846 storing the result in r0. */
4849 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4851 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4853 if (icode
== CODE_FOR_nothing
4854 || !insn_operand_matches (icode
, 0, r0
)
4855 || !insn_operand_matches (icode
, 1, r1
)
4856 || !insn_operand_matches (icode
, 2, c
))
4859 return GEN_FCN (icode
) (r0
, r1
, c
);
4863 have_sub2_insn (rtx x
, rtx y
)
4865 enum insn_code icode
;
4867 gcc_assert (GET_MODE (x
) != VOIDmode
);
4869 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4871 if (icode
== CODE_FOR_nothing
)
4874 if (!insn_operand_matches (icode
, 0, x
)
4875 || !insn_operand_matches (icode
, 1, x
)
4876 || !insn_operand_matches (icode
, 2, y
))
4882 /* Generate the body of an instruction to copy Y into X.
4883 It may be a list of insns, if one insn isn't enough. */
4886 gen_move_insn (rtx x
, rtx y
)
4891 emit_move_insn_1 (x
, y
);
4897 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4898 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4899 no such operation exists, CODE_FOR_nothing will be returned. */
4902 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4906 #ifdef HAVE_ptr_extend
4908 return CODE_FOR_ptr_extend
;
4911 tab
= unsignedp
? zext_optab
: sext_optab
;
4912 return convert_optab_handler (tab
, to_mode
, from_mode
);
4915 /* Generate the body of an insn to extend Y (with mode MFROM)
4916 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4919 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4920 enum machine_mode mfrom
, int unsignedp
)
4922 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4923 return GEN_FCN (icode
) (x
, y
);
4926 /* can_fix_p and can_float_p say whether the target machine
4927 can directly convert a given fixed point type to
4928 a given floating point type, or vice versa.
4929 The returned value is the CODE_FOR_... value to use,
4930 or CODE_FOR_nothing if these modes cannot be directly converted.
4932 *TRUNCP_PTR is set to 1 if it is necessary to output
4933 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4935 static enum insn_code
4936 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4937 int unsignedp
, int *truncp_ptr
)
4940 enum insn_code icode
;
4942 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4943 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4944 if (icode
!= CODE_FOR_nothing
)
4950 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4951 for this to work. We need to rework the fix* and ftrunc* patterns
4952 and documentation. */
4953 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4954 icode
= convert_optab_handler (tab
, fixmode
, fltmode
);
4955 if (icode
!= CODE_FOR_nothing
4956 && optab_handler (ftrunc_optab
, fltmode
) != CODE_FOR_nothing
)
4963 return CODE_FOR_nothing
;
4967 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4972 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4973 return convert_optab_handler (tab
, fltmode
, fixmode
);
4976 /* Function supportable_convert_operation
4978 Check whether an operation represented by the code CODE is a
4979 convert operation that is supported by the target platform in
4980 vector form (i.e., when operating on arguments of type VECTYPE_IN
4981 producing a result of type VECTYPE_OUT).
4983 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4984 This function checks if these operations are supported
4985 by the target platform either directly (via vector tree-codes), or via
4989 - CODE1 is code of vector operation to be used when
4990 vectorizing the operation, if available.
4991 - DECL is decl of target builtin functions to be used
4992 when vectorizing the operation, if available. In this case,
4993 CODE1 is CALL_EXPR. */
4996 supportable_convert_operation (enum tree_code code
,
4997 tree vectype_out
, tree vectype_in
,
4998 tree
*decl
, enum tree_code
*code1
)
5000 enum machine_mode m1
,m2
;
5003 m1
= TYPE_MODE (vectype_out
);
5004 m2
= TYPE_MODE (vectype_in
);
5006 /* First check if we can done conversion directly. */
5007 if ((code
== FIX_TRUNC_EXPR
5008 && can_fix_p (m1
,m2
,TYPE_UNSIGNED (vectype_out
), &truncp
)
5009 != CODE_FOR_nothing
)
5010 || (code
== FLOAT_EXPR
5011 && can_float_p (m1
,m2
,TYPE_UNSIGNED (vectype_in
))
5012 != CODE_FOR_nothing
))
5018 /* Now check for builtin. */
5019 if (targetm
.vectorize
.builtin_conversion
5020 && targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
5023 *decl
= targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
);
5030 /* Generate code to convert FROM to floating point
5031 and store in TO. FROM must be fixed point and not VOIDmode.
5032 UNSIGNEDP nonzero means regard FROM as unsigned.
5033 Normally this is done by correcting the final value
5034 if it is negative. */
5037 expand_float (rtx to
, rtx from
, int unsignedp
)
5039 enum insn_code icode
;
5041 enum machine_mode fmode
, imode
;
5042 bool can_do_signed
= false;
5044 /* Crash now, because we won't be able to decide which mode to use. */
5045 gcc_assert (GET_MODE (from
) != VOIDmode
);
5047 /* Look for an insn to do the conversion. Do it in the specified
5048 modes if possible; otherwise convert either input, output or both to
5049 wider mode. If the integer mode is wider than the mode of FROM,
5050 we can do the conversion signed even if the input is unsigned. */
5052 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5053 fmode
= GET_MODE_WIDER_MODE (fmode
))
5054 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
5055 imode
= GET_MODE_WIDER_MODE (imode
))
5057 int doing_unsigned
= unsignedp
;
5059 if (fmode
!= GET_MODE (to
)
5060 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
5063 icode
= can_float_p (fmode
, imode
, unsignedp
);
5064 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5066 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5067 if (scode
!= CODE_FOR_nothing
)
5068 can_do_signed
= true;
5069 if (imode
!= GET_MODE (from
))
5070 icode
= scode
, doing_unsigned
= 0;
5073 if (icode
!= CODE_FOR_nothing
)
5075 if (imode
!= GET_MODE (from
))
5076 from
= convert_to_mode (imode
, from
, unsignedp
);
5078 if (fmode
!= GET_MODE (to
))
5079 target
= gen_reg_rtx (fmode
);
5081 emit_unop_insn (icode
, target
, from
,
5082 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5085 convert_move (to
, target
, 0);
5090 /* Unsigned integer, and no way to convert directly. Convert as signed,
5091 then unconditionally adjust the result. */
5092 if (unsignedp
&& can_do_signed
)
5094 rtx_code_label
*label
= gen_label_rtx ();
5096 REAL_VALUE_TYPE offset
;
5098 /* Look for a usable floating mode FMODE wider than the source and at
5099 least as wide as the target. Using FMODE will avoid rounding woes
5100 with unsigned values greater than the signed maximum value. */
5102 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
5103 fmode
= GET_MODE_WIDER_MODE (fmode
))
5104 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
5105 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
5108 if (fmode
== VOIDmode
)
5110 /* There is no such mode. Pretend the target is wide enough. */
5111 fmode
= GET_MODE (to
);
5113 /* Avoid double-rounding when TO is narrower than FROM. */
5114 if ((significand_size (fmode
) + 1)
5115 < GET_MODE_PRECISION (GET_MODE (from
)))
5118 rtx_code_label
*neglabel
= gen_label_rtx ();
5120 /* Don't use TARGET if it isn't a register, is a hard register,
5121 or is the wrong mode. */
5123 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5124 || GET_MODE (target
) != fmode
)
5125 target
= gen_reg_rtx (fmode
);
5127 imode
= GET_MODE (from
);
5128 do_pending_stack_adjust ();
5130 /* Test whether the sign bit is set. */
5131 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5134 /* The sign bit is not set. Convert as signed. */
5135 expand_float (target
, from
, 0);
5136 emit_jump_insn (gen_jump (label
));
5139 /* The sign bit is set.
5140 Convert to a usable (positive signed) value by shifting right
5141 one bit, while remembering if a nonzero bit was shifted
5142 out; i.e., compute (from & 1) | (from >> 1). */
5144 emit_label (neglabel
);
5145 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5146 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5147 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5148 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5150 expand_float (target
, temp
, 0);
5152 /* Multiply by 2 to undo the shift above. */
5153 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5154 target
, 0, OPTAB_LIB_WIDEN
);
5156 emit_move_insn (target
, temp
);
5158 do_pending_stack_adjust ();
5164 /* If we are about to do some arithmetic to correct for an
5165 unsigned operand, do it in a pseudo-register. */
5167 if (GET_MODE (to
) != fmode
5168 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5169 target
= gen_reg_rtx (fmode
);
5171 /* Convert as signed integer to floating. */
5172 expand_float (target
, from
, 0);
5174 /* If FROM is negative (and therefore TO is negative),
5175 correct its value by 2**bitwidth. */
5177 do_pending_stack_adjust ();
5178 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
5182 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
5183 temp
= expand_binop (fmode
, add_optab
, target
,
5184 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
5185 target
, 0, OPTAB_LIB_WIDEN
);
5187 emit_move_insn (target
, temp
);
5189 do_pending_stack_adjust ();
5194 /* No hardware instruction available; call a library routine. */
5199 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5201 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_PRECISION (SImode
))
5202 from
= convert_to_mode (SImode
, from
, unsignedp
);
5204 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5205 gcc_assert (libfunc
);
5209 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5210 GET_MODE (to
), 1, from
,
5212 insns
= get_insns ();
5215 emit_libcall_block (insns
, target
, value
,
5216 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5217 GET_MODE (to
), from
));
5222 /* Copy result to requested destination
5223 if we have been computing in a temp location. */
5227 if (GET_MODE (target
) == GET_MODE (to
))
5228 emit_move_insn (to
, target
);
5230 convert_move (to
, target
, 0);
5234 /* Generate code to convert FROM to fixed point and store in TO. FROM
5235 must be floating point. */
5238 expand_fix (rtx to
, rtx from
, int unsignedp
)
5240 enum insn_code icode
;
5242 enum machine_mode fmode
, imode
;
5245 /* We first try to find a pair of modes, one real and one integer, at
5246 least as wide as FROM and TO, respectively, in which we can open-code
5247 this conversion. If the integer mode is wider than the mode of TO,
5248 we can do the conversion either signed or unsigned. */
5250 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5251 fmode
= GET_MODE_WIDER_MODE (fmode
))
5252 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5253 imode
= GET_MODE_WIDER_MODE (imode
))
5255 int doing_unsigned
= unsignedp
;
5257 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5258 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5259 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5261 if (icode
!= CODE_FOR_nothing
)
5263 rtx_insn
*last
= get_last_insn ();
5264 if (fmode
!= GET_MODE (from
))
5265 from
= convert_to_mode (fmode
, from
, 0);
5269 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5270 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5274 if (imode
!= GET_MODE (to
))
5275 target
= gen_reg_rtx (imode
);
5277 if (maybe_emit_unop_insn (icode
, target
, from
,
5278 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5281 convert_move (to
, target
, unsignedp
);
5284 delete_insns_since (last
);
5288 /* For an unsigned conversion, there is one more way to do it.
5289 If we have a signed conversion, we generate code that compares
5290 the real value to the largest representable positive number. If if
5291 is smaller, the conversion is done normally. Otherwise, subtract
5292 one plus the highest signed number, convert, and add it back.
5294 We only need to check all real modes, since we know we didn't find
5295 anything with a wider integer mode.
5297 This code used to extend FP value into mode wider than the destination.
5298 This is needed for decimal float modes which cannot accurately
5299 represent one plus the highest signed number of the same size, but
5300 not for binary modes. Consider, for instance conversion from SFmode
5303 The hot path through the code is dealing with inputs smaller than 2^63
5304 and doing just the conversion, so there is no bits to lose.
5306 In the other path we know the value is positive in the range 2^63..2^64-1
5307 inclusive. (as for other input overflow happens and result is undefined)
5308 So we know that the most important bit set in mantissa corresponds to
5309 2^63. The subtraction of 2^63 should not generate any rounding as it
5310 simply clears out that bit. The rest is trivial. */
5312 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5313 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5314 fmode
= GET_MODE_WIDER_MODE (fmode
))
5315 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
5316 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5317 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
5320 REAL_VALUE_TYPE offset
;
5322 rtx_code_label
*lab1
, *lab2
;
5325 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
5326 real_2expN (&offset
, bitsize
- 1, fmode
);
5327 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5328 lab1
= gen_label_rtx ();
5329 lab2
= gen_label_rtx ();
5331 if (fmode
!= GET_MODE (from
))
5332 from
= convert_to_mode (fmode
, from
, 0);
5334 /* See if we need to do the subtraction. */
5335 do_pending_stack_adjust ();
5336 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5339 /* If not, do the signed "fix" and branch around fixup code. */
5340 expand_fix (to
, from
, 0);
5341 emit_jump_insn (gen_jump (lab2
));
5344 /* Otherwise, subtract 2**(N-1), convert to signed number,
5345 then add 2**(N-1). Do the addition using XOR since this
5346 will often generate better code. */
5348 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5349 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5350 expand_fix (to
, target
, 0);
5351 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5353 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5355 to
, 1, OPTAB_LIB_WIDEN
);
5358 emit_move_insn (to
, target
);
5362 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
5364 /* Make a place for a REG_NOTE and add it. */
5365 insn
= emit_move_insn (to
, to
);
5366 set_dst_reg_note (insn
, REG_EQUAL
,
5367 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
5375 /* We can't do it with an insn, so use a library call. But first ensure
5376 that the mode of TO is at least as wide as SImode, since those are the
5377 only library calls we know about. */
5379 if (GET_MODE_PRECISION (GET_MODE (to
)) < GET_MODE_PRECISION (SImode
))
5381 target
= gen_reg_rtx (SImode
);
5383 expand_fix (target
, from
, unsignedp
);
5391 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5392 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5393 gcc_assert (libfunc
);
5397 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5398 GET_MODE (to
), 1, from
,
5400 insns
= get_insns ();
5403 emit_libcall_block (insns
, target
, value
,
5404 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5405 GET_MODE (to
), from
));
5410 if (GET_MODE (to
) == GET_MODE (target
))
5411 emit_move_insn (to
, target
);
5413 convert_move (to
, target
, 0);
5417 /* Generate code to convert FROM or TO a fixed-point.
5418 If UINTP is true, either TO or FROM is an unsigned integer.
5419 If SATP is true, we need to saturate the result. */
5422 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5424 enum machine_mode to_mode
= GET_MODE (to
);
5425 enum machine_mode from_mode
= GET_MODE (from
);
5427 enum rtx_code this_code
;
5428 enum insn_code code
;
5433 if (to_mode
== from_mode
)
5435 emit_move_insn (to
, from
);
5441 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5442 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5446 tab
= satp
? satfract_optab
: fract_optab
;
5447 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5449 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5450 if (code
!= CODE_FOR_nothing
)
5452 emit_unop_insn (code
, to
, from
, this_code
);
5456 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5457 gcc_assert (libfunc
);
5460 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5461 1, from
, from_mode
);
5462 insns
= get_insns ();
5465 emit_libcall_block (insns
, to
, value
,
5466 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5469 /* Generate code to convert FROM to fixed point and store in TO. FROM
5470 must be floating point, TO must be signed. Use the conversion optab
5471 TAB to do the conversion. */
5474 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5476 enum insn_code icode
;
5478 enum machine_mode fmode
, imode
;
5480 /* We first try to find a pair of modes, one real and one integer, at
5481 least as wide as FROM and TO, respectively, in which we can open-code
5482 this conversion. If the integer mode is wider than the mode of TO,
5483 we can do the conversion either signed or unsigned. */
5485 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5486 fmode
= GET_MODE_WIDER_MODE (fmode
))
5487 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5488 imode
= GET_MODE_WIDER_MODE (imode
))
5490 icode
= convert_optab_handler (tab
, imode
, fmode
);
5491 if (icode
!= CODE_FOR_nothing
)
5493 rtx_insn
*last
= get_last_insn ();
5494 if (fmode
!= GET_MODE (from
))
5495 from
= convert_to_mode (fmode
, from
, 0);
5497 if (imode
!= GET_MODE (to
))
5498 target
= gen_reg_rtx (imode
);
5500 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5502 delete_insns_since (last
);
5506 convert_move (to
, target
, 0);
5514 /* Report whether we have an instruction to perform the operation
5515 specified by CODE on operands of mode MODE. */
5517 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5519 return (code_to_optab (code
)
5520 && (optab_handler (code_to_optab (code
), mode
)
5521 != CODE_FOR_nothing
));
5524 /* Initialize the libfunc fields of an entire group of entries in some
5525 optab. Each entry is set equal to a string consisting of a leading
5526 pair of underscores followed by a generic operation name followed by
5527 a mode name (downshifted to lowercase) followed by a single character
5528 representing the number of operands for the given operation (which is
5529 usually one of the characters '2', '3', or '4').
5531 OPTABLE is the table in which libfunc fields are to be initialized.
5532 OPNAME is the generic (string) name of the operation.
5533 SUFFIX is the character which specifies the number of operands for
5534 the given generic operation.
5535 MODE is the mode to generate for.
5539 gen_libfunc (optab optable
, const char *opname
, int suffix
,
5540 enum machine_mode mode
)
5542 unsigned opname_len
= strlen (opname
);
5543 const char *mname
= GET_MODE_NAME (mode
);
5544 unsigned mname_len
= strlen (mname
);
5545 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5546 int len
= prefix_len
+ opname_len
+ mname_len
+ 1 + 1;
5547 char *libfunc_name
= XALLOCAVEC (char, len
);
5554 if (targetm
.libfunc_gnu_prefix
)
5561 for (q
= opname
; *q
; )
5563 for (q
= mname
; *q
; q
++)
5564 *p
++ = TOLOWER (*q
);
5568 set_optab_libfunc (optable
, mode
,
5569 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5572 /* Like gen_libfunc, but verify that integer operation is involved. */
5575 gen_int_libfunc (optab optable
, const char *opname
, char suffix
,
5576 enum machine_mode mode
)
5578 int maxsize
= 2 * BITS_PER_WORD
;
5579 int minsize
= BITS_PER_WORD
;
5581 if (GET_MODE_CLASS (mode
) != MODE_INT
)
5583 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5584 maxsize
= LONG_LONG_TYPE_SIZE
;
5585 if (minsize
> INT_TYPE_SIZE
5586 && (trapv_binoptab_p (optable
)
5587 || trapv_unoptab_p (optable
)))
5588 minsize
= INT_TYPE_SIZE
;
5589 if (GET_MODE_BITSIZE (mode
) < minsize
5590 || GET_MODE_BITSIZE (mode
) > maxsize
)
5592 gen_libfunc (optable
, opname
, suffix
, mode
);
5595 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5598 gen_fp_libfunc (optab optable
, const char *opname
, char suffix
,
5599 enum machine_mode mode
)
5603 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5604 gen_libfunc (optable
, opname
, suffix
, mode
);
5605 if (DECIMAL_FLOAT_MODE_P (mode
))
5607 dec_opname
= XALLOCAVEC (char, sizeof (DECIMAL_PREFIX
) + strlen (opname
));
5608 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5609 depending on the low level floating format used. */
5610 memcpy (dec_opname
, DECIMAL_PREFIX
, sizeof (DECIMAL_PREFIX
) - 1);
5611 strcpy (dec_opname
+ sizeof (DECIMAL_PREFIX
) - 1, opname
);
5612 gen_libfunc (optable
, dec_opname
, suffix
, mode
);
5616 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5619 gen_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5620 enum machine_mode mode
)
5622 if (!ALL_FIXED_POINT_MODE_P (mode
))
5624 gen_libfunc (optable
, opname
, suffix
, mode
);
5627 /* Like gen_libfunc, but verify that signed fixed-point operation is
5631 gen_signed_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5632 enum machine_mode mode
)
5634 if (!SIGNED_FIXED_POINT_MODE_P (mode
))
5636 gen_libfunc (optable
, opname
, suffix
, mode
);
5639 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5643 gen_unsigned_fixed_libfunc (optab optable
, const char *opname
, char suffix
,
5644 enum machine_mode mode
)
5646 if (!UNSIGNED_FIXED_POINT_MODE_P (mode
))
5648 gen_libfunc (optable
, opname
, suffix
, mode
);
5651 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5654 gen_int_fp_libfunc (optab optable
, const char *name
, char suffix
,
5655 enum machine_mode mode
)
5657 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5658 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5659 if (INTEGRAL_MODE_P (mode
))
5660 gen_int_libfunc (optable
, name
, suffix
, mode
);
5663 /* Like gen_libfunc, but verify that FP or INT operation is involved
5664 and add 'v' suffix for integer operation. */
5667 gen_intv_fp_libfunc (optab optable
, const char *name
, char suffix
,
5668 enum machine_mode mode
)
5670 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5671 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5672 if (GET_MODE_CLASS (mode
) == MODE_INT
)
5674 int len
= strlen (name
);
5675 char *v_name
= XALLOCAVEC (char, len
+ 2);
5676 strcpy (v_name
, name
);
5678 v_name
[len
+ 1] = 0;
5679 gen_int_libfunc (optable
, v_name
, suffix
, mode
);
5683 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5687 gen_int_fp_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5688 enum machine_mode mode
)
5690 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5691 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5692 if (INTEGRAL_MODE_P (mode
))
5693 gen_int_libfunc (optable
, name
, suffix
, mode
);
5694 if (ALL_FIXED_POINT_MODE_P (mode
))
5695 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5698 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5702 gen_int_fp_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5703 enum machine_mode mode
)
5705 if (DECIMAL_FLOAT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5706 gen_fp_libfunc (optable
, name
, suffix
, mode
);
5707 if (INTEGRAL_MODE_P (mode
))
5708 gen_int_libfunc (optable
, name
, suffix
, mode
);
5709 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5710 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5713 /* Like gen_libfunc, but verify that INT or FIXED operation is
5717 gen_int_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5718 enum machine_mode mode
)
5720 if (INTEGRAL_MODE_P (mode
))
5721 gen_int_libfunc (optable
, name
, suffix
, mode
);
5722 if (ALL_FIXED_POINT_MODE_P (mode
))
5723 gen_fixed_libfunc (optable
, name
, suffix
, mode
);
5726 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5730 gen_int_signed_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5731 enum machine_mode mode
)
5733 if (INTEGRAL_MODE_P (mode
))
5734 gen_int_libfunc (optable
, name
, suffix
, mode
);
5735 if (SIGNED_FIXED_POINT_MODE_P (mode
))
5736 gen_signed_fixed_libfunc (optable
, name
, suffix
, mode
);
5739 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5743 gen_int_unsigned_fixed_libfunc (optab optable
, const char *name
, char suffix
,
5744 enum machine_mode mode
)
5746 if (INTEGRAL_MODE_P (mode
))
5747 gen_int_libfunc (optable
, name
, suffix
, mode
);
5748 if (UNSIGNED_FIXED_POINT_MODE_P (mode
))
5749 gen_unsigned_fixed_libfunc (optable
, name
, suffix
, mode
);
5752 /* Initialize the libfunc fields of an entire group of entries of an
5753 inter-mode-class conversion optab. The string formation rules are
5754 similar to the ones for init_libfuncs, above, but instead of having
5755 a mode name and an operand count these functions have two mode names
5756 and no operand count. */
5759 gen_interclass_conv_libfunc (convert_optab tab
,
5761 enum machine_mode tmode
,
5762 enum machine_mode fmode
)
5764 size_t opname_len
= strlen (opname
);
5765 size_t mname_len
= 0;
5767 const char *fname
, *tname
;
5769 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5770 char *libfunc_name
, *suffix
;
5771 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5774 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5775 depends on which underlying decimal floating point format is used. */
5776 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5778 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5780 nondec_name
= XALLOCAVEC (char, prefix_len
+ opname_len
+ mname_len
+ 1 + 1);
5781 nondec_name
[0] = '_';
5782 nondec_name
[1] = '_';
5783 if (targetm
.libfunc_gnu_prefix
)
5785 nondec_name
[2] = 'g';
5786 nondec_name
[3] = 'n';
5787 nondec_name
[4] = 'u';
5788 nondec_name
[5] = '_';
5791 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5792 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5794 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5797 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5798 memcpy (&dec_name
[2+dec_len
], opname
, opname_len
);
5799 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5801 fname
= GET_MODE_NAME (fmode
);
5802 tname
= GET_MODE_NAME (tmode
);
5804 if (DECIMAL_FLOAT_MODE_P (fmode
) || DECIMAL_FLOAT_MODE_P (tmode
))
5806 libfunc_name
= dec_name
;
5807 suffix
= dec_suffix
;
5811 libfunc_name
= nondec_name
;
5812 suffix
= nondec_suffix
;
5816 for (q
= fname
; *q
; p
++, q
++)
5818 for (q
= tname
; *q
; p
++, q
++)
5823 set_conv_libfunc (tab
, tmode
, fmode
,
5824 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5827 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5828 int->fp conversion. */
5831 gen_int_to_fp_conv_libfunc (convert_optab tab
,
5833 enum machine_mode tmode
,
5834 enum machine_mode fmode
)
5836 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5838 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5840 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5843 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5847 gen_ufloat_conv_libfunc (convert_optab tab
,
5848 const char *opname ATTRIBUTE_UNUSED
,
5849 enum machine_mode tmode
,
5850 enum machine_mode fmode
)
5852 if (DECIMAL_FLOAT_MODE_P (tmode
))
5853 gen_int_to_fp_conv_libfunc (tab
, "floatuns", tmode
, fmode
);
5855 gen_int_to_fp_conv_libfunc (tab
, "floatun", tmode
, fmode
);
5858 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5859 fp->int conversion. */
5862 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab
,
5864 enum machine_mode tmode
,
5865 enum machine_mode fmode
)
5867 if (GET_MODE_CLASS (fmode
) != MODE_INT
)
5869 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
)
5871 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5874 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5875 fp->int conversion with no decimal floating point involved. */
5878 gen_fp_to_int_conv_libfunc (convert_optab tab
,
5880 enum machine_mode tmode
,
5881 enum machine_mode fmode
)
5883 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5885 if (GET_MODE_CLASS (tmode
) != MODE_INT
)
5887 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5890 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5891 The string formation rules are
5892 similar to the ones for init_libfunc, above. */
5895 gen_intraclass_conv_libfunc (convert_optab tab
, const char *opname
,
5896 enum machine_mode tmode
, enum machine_mode fmode
)
5898 size_t opname_len
= strlen (opname
);
5899 size_t mname_len
= 0;
5901 const char *fname
, *tname
;
5903 int prefix_len
= targetm
.libfunc_gnu_prefix
? 6 : 2;
5904 char *nondec_name
, *dec_name
, *nondec_suffix
, *dec_suffix
;
5905 char *libfunc_name
, *suffix
;
5908 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5909 depends on which underlying decimal floating point format is used. */
5910 const size_t dec_len
= sizeof (DECIMAL_PREFIX
) - 1;
5912 mname_len
= strlen (GET_MODE_NAME (tmode
)) + strlen (GET_MODE_NAME (fmode
));
5914 nondec_name
= XALLOCAVEC (char, 2 + opname_len
+ mname_len
+ 1 + 1);
5915 nondec_name
[0] = '_';
5916 nondec_name
[1] = '_';
5917 if (targetm
.libfunc_gnu_prefix
)
5919 nondec_name
[2] = 'g';
5920 nondec_name
[3] = 'n';
5921 nondec_name
[4] = 'u';
5922 nondec_name
[5] = '_';
5924 memcpy (&nondec_name
[prefix_len
], opname
, opname_len
);
5925 nondec_suffix
= nondec_name
+ opname_len
+ prefix_len
;
5927 dec_name
= XALLOCAVEC (char, 2 + dec_len
+ opname_len
+ mname_len
+ 1 + 1);
5930 memcpy (&dec_name
[2], DECIMAL_PREFIX
, dec_len
);
5931 memcpy (&dec_name
[2 + dec_len
], opname
, opname_len
);
5932 dec_suffix
= dec_name
+ dec_len
+ opname_len
+ 2;
5934 fname
= GET_MODE_NAME (fmode
);
5935 tname
= GET_MODE_NAME (tmode
);
5937 if (DECIMAL_FLOAT_MODE_P (fmode
) || DECIMAL_FLOAT_MODE_P (tmode
))
5939 libfunc_name
= dec_name
;
5940 suffix
= dec_suffix
;
5944 libfunc_name
= nondec_name
;
5945 suffix
= nondec_suffix
;
5949 for (q
= fname
; *q
; p
++, q
++)
5951 for (q
= tname
; *q
; p
++, q
++)
5957 set_conv_libfunc (tab
, tmode
, fmode
,
5958 ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5961 /* Pick proper libcall for trunc_optab. We need to chose if we do
5962 truncation or extension and interclass or intraclass. */
5965 gen_trunc_conv_libfunc (convert_optab tab
,
5967 enum machine_mode tmode
,
5968 enum machine_mode fmode
)
5970 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
5972 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
5977 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
5978 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
5979 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5981 if (GET_MODE_PRECISION (fmode
) <= GET_MODE_PRECISION (tmode
))
5984 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
5985 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
5986 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
5987 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
5990 /* Pick proper libcall for extend_optab. We need to chose if we do
5991 truncation or extension and interclass or intraclass. */
5994 gen_extend_conv_libfunc (convert_optab tab
,
5995 const char *opname ATTRIBUTE_UNUSED
,
5996 enum machine_mode tmode
,
5997 enum machine_mode fmode
)
5999 if (GET_MODE_CLASS (tmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (tmode
))
6001 if (GET_MODE_CLASS (fmode
) != MODE_FLOAT
&& !DECIMAL_FLOAT_MODE_P (fmode
))
6006 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (fmode
))
6007 || (GET_MODE_CLASS (fmode
) == MODE_FLOAT
&& DECIMAL_FLOAT_MODE_P (tmode
)))
6008 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6010 if (GET_MODE_PRECISION (fmode
) > GET_MODE_PRECISION (tmode
))
6013 if ((GET_MODE_CLASS (tmode
) == MODE_FLOAT
6014 && GET_MODE_CLASS (fmode
) == MODE_FLOAT
)
6015 || (DECIMAL_FLOAT_MODE_P (fmode
) && DECIMAL_FLOAT_MODE_P (tmode
)))
6016 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6019 /* Pick proper libcall for fract_optab. We need to chose if we do
6020 interclass or intraclass. */
6023 gen_fract_conv_libfunc (convert_optab tab
,
6025 enum machine_mode tmode
,
6026 enum machine_mode fmode
)
6030 if (!(ALL_FIXED_POINT_MODE_P (tmode
) || ALL_FIXED_POINT_MODE_P (fmode
)))
6033 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6034 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6036 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6039 /* Pick proper libcall for fractuns_optab. */
6042 gen_fractuns_conv_libfunc (convert_optab tab
,
6044 enum machine_mode tmode
,
6045 enum machine_mode fmode
)
6049 /* One mode must be a fixed-point mode, and the other must be an integer
6051 if (!((ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
)
6052 || (ALL_FIXED_POINT_MODE_P (fmode
)
6053 && GET_MODE_CLASS (tmode
) == MODE_INT
)))
6056 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6059 /* Pick proper libcall for satfract_optab. We need to chose if we do
6060 interclass or intraclass. */
6063 gen_satfract_conv_libfunc (convert_optab tab
,
6065 enum machine_mode tmode
,
6066 enum machine_mode fmode
)
6070 /* TMODE must be a fixed-point mode. */
6071 if (!ALL_FIXED_POINT_MODE_P (tmode
))
6074 if (GET_MODE_CLASS (tmode
) == GET_MODE_CLASS (fmode
))
6075 gen_intraclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6077 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6080 /* Pick proper libcall for satfractuns_optab. */
6083 gen_satfractuns_conv_libfunc (convert_optab tab
,
6085 enum machine_mode tmode
,
6086 enum machine_mode fmode
)
6090 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6091 if (!(ALL_FIXED_POINT_MODE_P (tmode
) && GET_MODE_CLASS (fmode
) == MODE_INT
))
6094 gen_interclass_conv_libfunc (tab
, opname
, tmode
, fmode
);
6097 /* Hashtable callbacks for libfunc_decls. */
6099 struct libfunc_decl_hasher
: ggc_hasher
<tree
>
6104 return IDENTIFIER_HASH_VALUE (DECL_NAME (entry
));
6108 equal (tree decl
, tree name
)
6110 return DECL_NAME (decl
) == name
;
6114 /* A table of previously-created libfuncs, hashed by name. */
6115 static GTY (()) hash_table
<libfunc_decl_hasher
> *libfunc_decls
;
6117 /* Build a decl for a libfunc named NAME. */
6120 build_libfunc_function (const char *name
)
6122 tree decl
= build_decl (UNKNOWN_LOCATION
, FUNCTION_DECL
,
6123 get_identifier (name
),
6124 build_function_type (integer_type_node
, NULL_TREE
));
6125 /* ??? We don't have any type information except for this is
6126 a function. Pretend this is "int foo()". */
6127 DECL_ARTIFICIAL (decl
) = 1;
6128 DECL_EXTERNAL (decl
) = 1;
6129 TREE_PUBLIC (decl
) = 1;
6130 gcc_assert (DECL_ASSEMBLER_NAME (decl
));
6132 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6133 are the flags assigned by targetm.encode_section_info. */
6134 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl
), 0), NULL
);
6140 init_one_libfunc (const char *name
)
6145 if (libfunc_decls
== NULL
)
6146 libfunc_decls
= hash_table
<libfunc_decl_hasher
>::create_ggc (37);
6148 /* See if we have already created a libfunc decl for this function. */
6149 id
= get_identifier (name
);
6150 hash
= IDENTIFIER_HASH_VALUE (id
);
6151 tree
*slot
= libfunc_decls
->find_slot_with_hash (id
, hash
, INSERT
);
6155 /* Create a new decl, so that it can be passed to
6156 targetm.encode_section_info. */
6157 decl
= build_libfunc_function (name
);
6160 return XEXP (DECL_RTL (decl
), 0);
6163 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6166 set_user_assembler_libfunc (const char *name
, const char *asmspec
)
6171 id
= get_identifier (name
);
6172 hash
= IDENTIFIER_HASH_VALUE (id
);
6173 tree
*slot
= libfunc_decls
->find_slot_with_hash (id
, hash
, NO_INSERT
);
6175 decl
= (tree
) *slot
;
6176 set_user_assembler_name (decl
, asmspec
);
6177 return XEXP (DECL_RTL (decl
), 0);
6180 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6181 MODE to NAME, which should be either 0 or a string constant. */
6183 set_optab_libfunc (optab op
, enum machine_mode mode
, const char *name
)
6186 struct libfunc_entry e
;
6187 struct libfunc_entry
**slot
;
6194 val
= init_one_libfunc (name
);
6197 slot
= libfunc_hash
->find_slot (&e
, INSERT
);
6199 *slot
= ggc_alloc
<libfunc_entry
> ();
6201 (*slot
)->mode1
= mode
;
6202 (*slot
)->mode2
= VOIDmode
;
6203 (*slot
)->libfunc
= val
;
6206 /* Call this to reset the function entry for one conversion optab
6207 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6208 either 0 or a string constant. */
6210 set_conv_libfunc (convert_optab optab
, enum machine_mode tmode
,
6211 enum machine_mode fmode
, const char *name
)
6214 struct libfunc_entry e
;
6215 struct libfunc_entry
**slot
;
6222 val
= init_one_libfunc (name
);
6225 slot
= libfunc_hash
->find_slot (&e
, INSERT
);
6227 *slot
= ggc_alloc
<libfunc_entry
> ();
6228 (*slot
)->op
= optab
;
6229 (*slot
)->mode1
= tmode
;
6230 (*slot
)->mode2
= fmode
;
6231 (*slot
)->libfunc
= val
;
6234 /* Call this to initialize the contents of the optabs
6235 appropriately for the current target machine. */
6241 libfunc_hash
->empty ();
6243 libfunc_hash
= hash_table
<libfunc_hasher
>::create_ggc (10);
6245 /* Fill in the optabs with the insns we support. */
6246 init_all_optabs (this_fn_optabs
);
6248 /* The ffs function operates on `int'. Fall back on it if we do not
6249 have a libgcc2 function for that width. */
6250 if (INT_TYPE_SIZE
< BITS_PER_WORD
)
6251 set_optab_libfunc (ffs_optab
, mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0),
6254 /* Explicitly initialize the bswap libfuncs since we need them to be
6255 valid for things other than word_mode. */
6256 if (targetm
.libfunc_gnu_prefix
)
6258 set_optab_libfunc (bswap_optab
, SImode
, "__gnu_bswapsi2");
6259 set_optab_libfunc (bswap_optab
, DImode
, "__gnu_bswapdi2");
6263 set_optab_libfunc (bswap_optab
, SImode
, "__bswapsi2");
6264 set_optab_libfunc (bswap_optab
, DImode
, "__bswapdi2");
6267 /* Use cabs for double complex abs, since systems generally have cabs.
6268 Don't define any libcall for float complex, so that cabs will be used. */
6269 if (complex_double_type_node
)
6270 set_optab_libfunc (abs_optab
, TYPE_MODE (complex_double_type_node
),
6273 abort_libfunc
= init_one_libfunc ("abort");
6274 memcpy_libfunc
= init_one_libfunc ("memcpy");
6275 memmove_libfunc
= init_one_libfunc ("memmove");
6276 memcmp_libfunc
= init_one_libfunc ("memcmp");
6277 memset_libfunc
= init_one_libfunc ("memset");
6278 setbits_libfunc
= init_one_libfunc ("__setbits");
6280 #ifndef DONT_USE_BUILTIN_SETJMP
6281 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
6282 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
6284 setjmp_libfunc
= init_one_libfunc ("setjmp");
6285 longjmp_libfunc
= init_one_libfunc ("longjmp");
6287 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
6288 unwind_sjlj_unregister_libfunc
6289 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6291 /* For function entry/exit instrumentation. */
6292 profile_function_entry_libfunc
6293 = init_one_libfunc ("__cyg_profile_func_enter");
6294 profile_function_exit_libfunc
6295 = init_one_libfunc ("__cyg_profile_func_exit");
6297 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
6299 /* Allow the target to add more libcalls or rename some, etc. */
6300 targetm
.init_libfuncs ();
6303 /* Use the current target and options to initialize
6304 TREE_OPTIMIZATION_OPTABS (OPTNODE). */
6307 init_tree_optimization_optabs (tree optnode
)
6309 /* Quick exit if we have already computed optabs for this target. */
6310 if (TREE_OPTIMIZATION_BASE_OPTABS (optnode
) == this_target_optabs
)
6313 /* Forget any previous information and set up for the current target. */
6314 TREE_OPTIMIZATION_BASE_OPTABS (optnode
) = this_target_optabs
;
6315 struct target_optabs
*tmp_optabs
= (struct target_optabs
*)
6316 TREE_OPTIMIZATION_OPTABS (optnode
);
6318 memset (tmp_optabs
, 0, sizeof (struct target_optabs
));
6320 tmp_optabs
= ggc_alloc
<target_optabs
> ();
6322 /* Generate a new set of optabs into tmp_optabs. */
6323 init_all_optabs (tmp_optabs
);
6325 /* If the optabs changed, record it. */
6326 if (memcmp (tmp_optabs
, this_target_optabs
, sizeof (struct target_optabs
)))
6327 TREE_OPTIMIZATION_OPTABS (optnode
) = tmp_optabs
;
6330 TREE_OPTIMIZATION_OPTABS (optnode
) = NULL
;
6331 ggc_free (tmp_optabs
);
6335 /* A helper function for init_sync_libfuncs. Using the basename BASE,
6336 install libfuncs into TAB for BASE_N for 1 <= N <= MAX. */
6339 init_sync_libfuncs_1 (optab tab
, const char *base
, int max
)
6341 enum machine_mode mode
;
6343 size_t len
= strlen (base
);
6346 gcc_assert (max
<= 8);
6347 gcc_assert (len
+ 3 < sizeof (buf
));
6349 memcpy (buf
, base
, len
);
6352 buf
[len
+ 2] = '\0';
6355 for (i
= 1; i
<= max
; i
*= 2)
6357 buf
[len
+ 1] = '0' + i
;
6358 set_optab_libfunc (tab
, mode
, buf
);
6359 mode
= GET_MODE_2XWIDER_MODE (mode
);
6364 init_sync_libfuncs (int max
)
6366 if (!flag_sync_libcalls
)
6369 init_sync_libfuncs_1 (sync_compare_and_swap_optab
,
6370 "__sync_val_compare_and_swap", max
);
6371 init_sync_libfuncs_1 (sync_lock_test_and_set_optab
,
6372 "__sync_lock_test_and_set", max
);
6374 init_sync_libfuncs_1 (sync_old_add_optab
, "__sync_fetch_and_add", max
);
6375 init_sync_libfuncs_1 (sync_old_sub_optab
, "__sync_fetch_and_sub", max
);
6376 init_sync_libfuncs_1 (sync_old_ior_optab
, "__sync_fetch_and_or", max
);
6377 init_sync_libfuncs_1 (sync_old_and_optab
, "__sync_fetch_and_and", max
);
6378 init_sync_libfuncs_1 (sync_old_xor_optab
, "__sync_fetch_and_xor", max
);
6379 init_sync_libfuncs_1 (sync_old_nand_optab
, "__sync_fetch_and_nand", max
);
6381 init_sync_libfuncs_1 (sync_new_add_optab
, "__sync_add_and_fetch", max
);
6382 init_sync_libfuncs_1 (sync_new_sub_optab
, "__sync_sub_and_fetch", max
);
6383 init_sync_libfuncs_1 (sync_new_ior_optab
, "__sync_or_and_fetch", max
);
6384 init_sync_libfuncs_1 (sync_new_and_optab
, "__sync_and_and_fetch", max
);
6385 init_sync_libfuncs_1 (sync_new_xor_optab
, "__sync_xor_and_fetch", max
);
6386 init_sync_libfuncs_1 (sync_new_nand_optab
, "__sync_nand_and_fetch", max
);
6389 /* Print information about the current contents of the optabs on
6393 debug_optab_libfuncs (void)
6397 /* Dump the arithmetic optabs. */
6398 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
6399 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6401 rtx l
= optab_libfunc ((optab
) i
, (enum machine_mode
) j
);
6404 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6405 fprintf (stderr
, "%s\t%s:\t%s\n",
6406 GET_RTX_NAME (optab_to_code ((optab
) i
)),
6412 /* Dump the conversion optabs. */
6413 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
6414 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6415 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6417 rtx l
= convert_optab_libfunc ((optab
) i
, (enum machine_mode
) j
,
6418 (enum machine_mode
) k
);
6421 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6422 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6423 GET_RTX_NAME (optab_to_code ((optab
) i
)),
6432 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6433 CODE. Return 0 on failure. */
6436 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
6438 enum machine_mode mode
= GET_MODE (op1
);
6439 enum insn_code icode
;
6443 if (mode
== VOIDmode
)
6446 icode
= optab_handler (ctrap_optab
, mode
);
6447 if (icode
== CODE_FOR_nothing
)
6450 /* Some targets only accept a zero trap code. */
6451 if (!insn_operand_matches (icode
, 3, tcode
))
6454 do_pending_stack_adjust ();
6456 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
6461 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
6464 /* If that failed, then give up. */
6472 insn
= get_insns ();
6477 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6478 or unsigned operation code. */
6480 static enum rtx_code
6481 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6493 code
= unsignedp
? LTU
: LT
;
6496 code
= unsignedp
? LEU
: LE
;
6499 code
= unsignedp
? GTU
: GT
;
6502 code
= unsignedp
? GEU
: GE
;
6505 case UNORDERED_EXPR
:
6536 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6537 unsigned operators. Do not generate compare instruction. */
6540 vector_compare_rtx (enum tree_code tcode
, tree t_op0
, tree t_op1
,
6541 bool unsignedp
, enum insn_code icode
)
6543 struct expand_operand ops
[2];
6544 rtx rtx_op0
, rtx_op1
;
6545 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
6547 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
6549 /* Expand operands. */
6550 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6552 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6555 create_input_operand (&ops
[0], rtx_op0
, GET_MODE (rtx_op0
));
6556 create_input_operand (&ops
[1], rtx_op1
, GET_MODE (rtx_op1
));
6557 if (!maybe_legitimize_operands (icode
, 4, 2, ops
))
6559 return gen_rtx_fmt_ee (rcode
, VOIDmode
, ops
[0].value
, ops
[1].value
);
6562 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6563 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6566 can_vec_perm_p (enum machine_mode mode
, bool variable
,
6567 const unsigned char *sel
)
6569 enum machine_mode qimode
;
6571 /* If the target doesn't implement a vector mode for the vector type,
6572 then no operations are supported. */
6573 if (!VECTOR_MODE_P (mode
))
6578 if (direct_optab_handler (vec_perm_const_optab
, mode
) != CODE_FOR_nothing
6580 || targetm
.vectorize
.vec_perm_const_ok
== NULL
6581 || targetm
.vectorize
.vec_perm_const_ok (mode
, sel
)))
6585 if (direct_optab_handler (vec_perm_optab
, mode
) != CODE_FOR_nothing
)
6588 /* We allow fallback to a QI vector mode, and adjust the mask. */
6589 if (GET_MODE_INNER (mode
) == QImode
)
6591 qimode
= mode_for_vector (QImode
, GET_MODE_SIZE (mode
));
6592 if (!VECTOR_MODE_P (qimode
))
6595 /* ??? For completeness, we ought to check the QImode version of
6596 vec_perm_const_optab. But all users of this implicit lowering
6597 feature implement the variable vec_perm_optab. */
6598 if (direct_optab_handler (vec_perm_optab
, qimode
) == CODE_FOR_nothing
)
6601 /* In order to support the lowering of variable permutations,
6602 we need to support shifts and adds. */
6605 if (GET_MODE_UNIT_SIZE (mode
) > 2
6606 && optab_handler (ashl_optab
, mode
) == CODE_FOR_nothing
6607 && optab_handler (vashl_optab
, mode
) == CODE_FOR_nothing
)
6609 if (optab_handler (add_optab
, qimode
) == CODE_FOR_nothing
)
6616 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
6619 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
6620 rtx v0
, rtx v1
, rtx sel
)
6622 enum machine_mode tmode
= GET_MODE (target
);
6623 enum machine_mode smode
= GET_MODE (sel
);
6624 struct expand_operand ops
[4];
6626 create_output_operand (&ops
[0], target
, tmode
);
6627 create_input_operand (&ops
[3], sel
, smode
);
6629 /* Make an effort to preserve v0 == v1. The target expander is able to
6630 rely on this to determine if we're permuting a single input operand. */
6631 if (rtx_equal_p (v0
, v1
))
6633 if (!insn_operand_matches (icode
, 1, v0
))
6634 v0
= force_reg (tmode
, v0
);
6635 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
6636 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
6638 create_fixed_operand (&ops
[1], v0
);
6639 create_fixed_operand (&ops
[2], v0
);
6643 create_input_operand (&ops
[1], v0
, tmode
);
6644 create_input_operand (&ops
[2], v1
, tmode
);
6647 if (maybe_expand_insn (icode
, 4, ops
))
6648 return ops
[0].value
;
6652 /* Generate instructions for vec_perm optab given its mode
6653 and three operands. */
6656 expand_vec_perm (enum machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6658 enum insn_code icode
;
6659 enum machine_mode qimode
;
6660 unsigned int i
, w
, e
, u
;
6661 rtx tmp
, sel_qi
= NULL
;
6664 if (!target
|| GET_MODE (target
) != mode
)
6665 target
= gen_reg_rtx (mode
);
6667 w
= GET_MODE_SIZE (mode
);
6668 e
= GET_MODE_NUNITS (mode
);
6669 u
= GET_MODE_UNIT_SIZE (mode
);
6671 /* Set QIMODE to a different vector mode with byte elements.
6672 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6674 if (GET_MODE_INNER (mode
) != QImode
)
6676 qimode
= mode_for_vector (QImode
, w
);
6677 if (!VECTOR_MODE_P (qimode
))
6681 /* If the input is a constant, expand it specially. */
6682 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
6683 if (GET_CODE (sel
) == CONST_VECTOR
)
6685 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
6686 if (icode
!= CODE_FOR_nothing
)
6688 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
6693 /* Fall back to a constant byte-based permutation. */
6694 if (qimode
!= VOIDmode
)
6696 vec
= rtvec_alloc (w
);
6697 for (i
= 0; i
< e
; ++i
)
6699 unsigned int j
, this_e
;
6701 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
6702 this_e
&= 2 * e
- 1;
6705 for (j
= 0; j
< u
; ++j
)
6706 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
6708 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
6710 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
6711 if (icode
!= CODE_FOR_nothing
)
6713 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
6714 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
6715 gen_lowpart (qimode
, v1
), sel_qi
);
6717 return gen_lowpart (mode
, tmp
);
6722 /* Otherwise expand as a fully variable permuation. */
6723 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6724 if (icode
!= CODE_FOR_nothing
)
6726 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
6731 /* As a special case to aid several targets, lower the element-based
6732 permutation to a byte-based permutation and try again. */
6733 if (qimode
== VOIDmode
)
6735 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6736 if (icode
== CODE_FOR_nothing
)
6741 /* Multiply each element by its byte size. */
6742 enum machine_mode selmode
= GET_MODE (sel
);
6744 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
6745 sel
, 0, OPTAB_DIRECT
);
6747 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
6748 GEN_INT (exact_log2 (u
)),
6749 sel
, 0, OPTAB_DIRECT
);
6750 gcc_assert (sel
!= NULL
);
6752 /* Broadcast the low byte each element into each of its bytes. */
6753 vec
= rtvec_alloc (w
);
6754 for (i
= 0; i
< w
; ++i
)
6756 int this_e
= i
/ u
* u
;
6757 if (BYTES_BIG_ENDIAN
)
6759 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
6761 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
6762 sel
= gen_lowpart (qimode
, sel
);
6763 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
6764 gcc_assert (sel
!= NULL
);
6766 /* Add the byte offset to each byte element. */
6767 /* Note that the definition of the indicies here is memory ordering,
6768 so there should be no difference between big and little endian. */
6769 vec
= rtvec_alloc (w
);
6770 for (i
= 0; i
< w
; ++i
)
6771 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
6772 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
6773 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
6774 sel
, 0, OPTAB_DIRECT
);
6775 gcc_assert (sel_qi
!= NULL
);
6778 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
6779 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
6780 gen_lowpart (qimode
, v1
), sel_qi
);
6782 tmp
= gen_lowpart (mode
, tmp
);
6786 /* Return insn code for a conditional operator with a comparison in
6787 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
6789 static inline enum insn_code
6790 get_vcond_icode (enum machine_mode vmode
, enum machine_mode cmode
, bool uns
)
6792 enum insn_code icode
= CODE_FOR_nothing
;
6794 icode
= convert_optab_handler (vcondu_optab
, vmode
, cmode
);
6796 icode
= convert_optab_handler (vcond_optab
, vmode
, cmode
);
6800 /* Return TRUE iff, appropriate vector insns are available
6801 for vector cond expr with vector type VALUE_TYPE and a comparison
6802 with operand vector types in CMP_OP_TYPE. */
6805 expand_vec_cond_expr_p (tree value_type
, tree cmp_op_type
)
6807 enum machine_mode value_mode
= TYPE_MODE (value_type
);
6808 enum machine_mode cmp_op_mode
= TYPE_MODE (cmp_op_type
);
6809 if (GET_MODE_SIZE (value_mode
) != GET_MODE_SIZE (cmp_op_mode
)
6810 || GET_MODE_NUNITS (value_mode
) != GET_MODE_NUNITS (cmp_op_mode
)
6811 || get_vcond_icode (TYPE_MODE (value_type
), TYPE_MODE (cmp_op_type
),
6812 TYPE_UNSIGNED (cmp_op_type
)) == CODE_FOR_nothing
)
6817 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6821 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
6824 struct expand_operand ops
[6];
6825 enum insn_code icode
;
6826 rtx comparison
, rtx_op1
, rtx_op2
;
6827 enum machine_mode mode
= TYPE_MODE (vec_cond_type
);
6828 enum machine_mode cmp_op_mode
;
6831 enum tree_code tcode
;
6833 if (COMPARISON_CLASS_P (op0
))
6835 op0a
= TREE_OPERAND (op0
, 0);
6836 op0b
= TREE_OPERAND (op0
, 1);
6837 tcode
= TREE_CODE (op0
);
6842 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (op0
)));
6844 op0b
= build_zero_cst (TREE_TYPE (op0
));
6847 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
6848 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
6851 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
6852 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
6854 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
6855 if (icode
== CODE_FOR_nothing
)
6858 comparison
= vector_compare_rtx (tcode
, op0a
, op0b
, unsignedp
, icode
);
6859 rtx_op1
= expand_normal (op1
);
6860 rtx_op2
= expand_normal (op2
);
6862 create_output_operand (&ops
[0], target
, mode
);
6863 create_input_operand (&ops
[1], rtx_op1
, mode
);
6864 create_input_operand (&ops
[2], rtx_op2
, mode
);
6865 create_fixed_operand (&ops
[3], comparison
);
6866 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
6867 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
6868 expand_insn (icode
, 6, ops
);
6869 return ops
[0].value
;
6872 /* Return non-zero if a highpart multiply is supported of can be synthisized.
6873 For the benefit of expand_mult_highpart, the return value is 1 for direct,
6874 2 for even/odd widening, and 3 for hi/lo widening. */
6877 can_mult_highpart_p (enum machine_mode mode
, bool uns_p
)
6883 op
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6884 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
6887 /* If the mode is an integral vector, synth from widening operations. */
6888 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
6891 nunits
= GET_MODE_NUNITS (mode
);
6892 sel
= XALLOCAVEC (unsigned char, nunits
);
6894 op
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6895 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
6897 op
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6898 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
6900 for (i
= 0; i
< nunits
; ++i
)
6901 sel
[i
] = !BYTES_BIG_ENDIAN
+ (i
& ~1) + ((i
& 1) ? nunits
: 0);
6902 if (can_vec_perm_p (mode
, false, sel
))
6907 op
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6908 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
6910 op
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6911 if (optab_handler (op
, mode
) != CODE_FOR_nothing
)
6913 for (i
= 0; i
< nunits
; ++i
)
6914 sel
[i
] = 2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1);
6915 if (can_vec_perm_p (mode
, false, sel
))
6923 /* Expand a highpart multiply. */
6926 expand_mult_highpart (enum machine_mode mode
, rtx op0
, rtx op1
,
6927 rtx target
, bool uns_p
)
6929 struct expand_operand eops
[3];
6930 enum insn_code icode
;
6931 int method
, i
, nunits
;
6932 enum machine_mode wmode
;
6937 method
= can_mult_highpart_p (mode
, uns_p
);
6943 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6944 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
6947 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6948 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6951 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6952 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6953 if (BYTES_BIG_ENDIAN
)
6964 icode
= optab_handler (tab1
, mode
);
6965 nunits
= GET_MODE_NUNITS (mode
);
6966 wmode
= insn_data
[icode
].operand
[0].mode
;
6967 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
6968 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
6970 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6971 create_input_operand (&eops
[1], op0
, mode
);
6972 create_input_operand (&eops
[2], op1
, mode
);
6973 expand_insn (icode
, 3, eops
);
6974 m1
= gen_lowpart (mode
, eops
[0].value
);
6976 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6977 create_input_operand (&eops
[1], op0
, mode
);
6978 create_input_operand (&eops
[2], op1
, mode
);
6979 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
6980 m2
= gen_lowpart (mode
, eops
[0].value
);
6982 v
= rtvec_alloc (nunits
);
6985 for (i
= 0; i
< nunits
; ++i
)
6986 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
6987 + ((i
& 1) ? nunits
: 0));
6991 for (i
= 0; i
< nunits
; ++i
)
6992 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
6994 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
6996 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
6999 /* Return true if target supports vector masked load/store for mode. */
7001 can_vec_mask_load_store_p (enum machine_mode mode
, bool is_load
)
7003 optab op
= is_load
? maskload_optab
: maskstore_optab
;
7004 enum machine_mode vmode
;
7005 unsigned int vector_sizes
;
7007 /* If mode is vector mode, check it directly. */
7008 if (VECTOR_MODE_P (mode
))
7009 return optab_handler (op
, mode
) != CODE_FOR_nothing
;
7011 /* Otherwise, return true if there is some vector mode with
7012 the mask load/store supported. */
7014 /* See if there is any chance the mask load or store might be
7015 vectorized. If not, punt. */
7016 vmode
= targetm
.vectorize
.preferred_simd_mode (mode
);
7017 if (!VECTOR_MODE_P (vmode
))
7020 if (optab_handler (op
, vmode
) != CODE_FOR_nothing
)
7023 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
7024 while (vector_sizes
!= 0)
7026 unsigned int cur
= 1 << floor_log2 (vector_sizes
);
7027 vector_sizes
&= ~cur
;
7028 if (cur
<= GET_MODE_SIZE (mode
))
7030 vmode
= mode_for_vector (mode
, cur
/ GET_MODE_SIZE (mode
));
7031 if (VECTOR_MODE_P (vmode
)
7032 && optab_handler (op
, vmode
) != CODE_FOR_nothing
)
7038 /* Return true if there is a compare_and_swap pattern. */
7041 can_compare_and_swap_p (enum machine_mode mode
, bool allow_libcall
)
7043 enum insn_code icode
;
7045 /* Check for __atomic_compare_and_swap. */
7046 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
7047 if (icode
!= CODE_FOR_nothing
)
7050 /* Check for __sync_compare_and_swap. */
7051 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
7052 if (icode
!= CODE_FOR_nothing
)
7054 if (allow_libcall
&& optab_libfunc (sync_compare_and_swap_optab
, mode
))
7057 /* No inline compare and swap. */
7061 /* Return true if an atomic exchange can be performed. */
7064 can_atomic_exchange_p (enum machine_mode mode
, bool allow_libcall
)
7066 enum insn_code icode
;
7068 /* Check for __atomic_exchange. */
7069 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
7070 if (icode
!= CODE_FOR_nothing
)
7073 /* Don't check __sync_test_and_set, as on some platforms that
7074 has reduced functionality. Targets that really do support
7075 a proper exchange should simply be updated to the __atomics. */
7077 return can_compare_and_swap_p (mode
, allow_libcall
);
7081 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7085 find_cc_set (rtx x
, const_rtx pat
, void *data
)
7087 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
7088 && GET_CODE (pat
) == SET
)
7090 rtx
*p_cc_reg
= (rtx
*) data
;
7091 gcc_assert (!*p_cc_reg
);
7096 /* This is a helper function for the other atomic operations. This function
7097 emits a loop that contains SEQ that iterates until a compare-and-swap
7098 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7099 a set of instructions that takes a value from OLD_REG as an input and
7100 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7101 set to the current contents of MEM. After SEQ, a compare-and-swap will
7102 attempt to update MEM with NEW_REG. The function returns true when the
7103 loop was generated successfully. */
7106 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
7108 enum machine_mode mode
= GET_MODE (mem
);
7109 rtx_code_label
*label
;
7110 rtx cmp_reg
, success
, oldval
;
7112 /* The loop we want to generate looks like
7118 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
7122 Note that we only do the plain load from memory once. Subsequent
7123 iterations use the value loaded by the compare-and-swap pattern. */
7125 label
= gen_label_rtx ();
7126 cmp_reg
= gen_reg_rtx (mode
);
7128 emit_move_insn (cmp_reg
, mem
);
7130 emit_move_insn (old_reg
, cmp_reg
);
7136 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
7137 new_reg
, false, MEMMODEL_SEQ_CST
,
7141 if (oldval
!= cmp_reg
)
7142 emit_move_insn (cmp_reg
, oldval
);
7144 /* Mark this jump predicted not taken. */
7145 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
7146 GET_MODE (success
), 1, label
, 0);
7151 /* This function tries to emit an atomic_exchange intruction. VAL is written
7152 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
7153 using TARGET if possible. */
7156 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
7158 enum machine_mode mode
= GET_MODE (mem
);
7159 enum insn_code icode
;
7161 /* If the target supports the exchange directly, great. */
7162 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
7163 if (icode
!= CODE_FOR_nothing
)
7165 struct expand_operand ops
[4];
7167 create_output_operand (&ops
[0], target
, mode
);
7168 create_fixed_operand (&ops
[1], mem
);
7169 create_input_operand (&ops
[2], val
, mode
);
7170 create_integer_operand (&ops
[3], model
);
7171 if (maybe_expand_insn (icode
, 4, ops
))
7172 return ops
[0].value
;
7178 /* This function tries to implement an atomic exchange operation using
7179 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
7180 The previous contents of *MEM are returned, using TARGET if possible.
7181 Since this instructionn is an acquire barrier only, stronger memory
7182 models may require additional barriers to be emitted. */
7185 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
7186 enum memmodel model
)
7188 enum machine_mode mode
= GET_MODE (mem
);
7189 enum insn_code icode
;
7190 rtx_insn
*last_insn
= get_last_insn ();
7192 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
7194 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
7195 exists, and the memory model is stronger than acquire, add a release
7196 barrier before the instruction. */
7198 if ((model
& MEMMODEL_MASK
) == MEMMODEL_SEQ_CST
7199 || (model
& MEMMODEL_MASK
) == MEMMODEL_RELEASE
7200 || (model
& MEMMODEL_MASK
) == MEMMODEL_ACQ_REL
)
7201 expand_mem_thread_fence (model
);
7203 if (icode
!= CODE_FOR_nothing
)
7205 struct expand_operand ops
[3];
7206 create_output_operand (&ops
[0], target
, mode
);
7207 create_fixed_operand (&ops
[1], mem
);
7208 create_input_operand (&ops
[2], val
, mode
);
7209 if (maybe_expand_insn (icode
, 3, ops
))
7210 return ops
[0].value
;
7213 /* If an external test-and-set libcall is provided, use that instead of
7214 any external compare-and-swap that we might get from the compare-and-
7215 swap-loop expansion later. */
7216 if (!can_compare_and_swap_p (mode
, false))
7218 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
7219 if (libfunc
!= NULL
)
7223 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7224 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
7225 mode
, 2, addr
, ptr_mode
,
7230 /* If the test_and_set can't be emitted, eliminate any barrier that might
7231 have been emitted. */
7232 delete_insns_since (last_insn
);
7236 /* This function tries to implement an atomic exchange operation using a
7237 compare_and_swap loop. VAL is written to *MEM. The previous contents of
7238 *MEM are returned, using TARGET if possible. No memory model is required
7239 since a compare_and_swap loop is seq-cst. */
7242 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
7244 enum machine_mode mode
= GET_MODE (mem
);
7246 if (can_compare_and_swap_p (mode
, true))
7248 if (!target
|| !register_operand (target
, mode
))
7249 target
= gen_reg_rtx (mode
);
7250 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
7257 /* This function tries to implement an atomic test-and-set operation
7258 using the atomic_test_and_set instruction pattern. A boolean value
7259 is returned from the operation, using TARGET if possible. */
7261 #ifndef HAVE_atomic_test_and_set
7262 #define HAVE_atomic_test_and_set 0
7263 #define CODE_FOR_atomic_test_and_set CODE_FOR_nothing
7267 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
7269 enum machine_mode pat_bool_mode
;
7270 struct expand_operand ops
[3];
7272 if (!HAVE_atomic_test_and_set
)
7275 /* While we always get QImode from __atomic_test_and_set, we get
7276 other memory modes from __sync_lock_test_and_set. Note that we
7277 use no endian adjustment here. This matches the 4.6 behavior
7278 in the Sparc backend. */
7280 (insn_data
[CODE_FOR_atomic_test_and_set
].operand
[1].mode
== QImode
);
7281 if (GET_MODE (mem
) != QImode
)
7282 mem
= adjust_address_nv (mem
, QImode
, 0);
7284 pat_bool_mode
= insn_data
[CODE_FOR_atomic_test_and_set
].operand
[0].mode
;
7285 create_output_operand (&ops
[0], target
, pat_bool_mode
);
7286 create_fixed_operand (&ops
[1], mem
);
7287 create_integer_operand (&ops
[2], model
);
7289 if (maybe_expand_insn (CODE_FOR_atomic_test_and_set
, 3, ops
))
7290 return ops
[0].value
;
7294 /* This function expands the legacy _sync_lock test_and_set operation which is
7295 generally an atomic exchange. Some limited targets only allow the
7296 constant 1 to be stored. This is an ACQUIRE operation.
7298 TARGET is an optional place to stick the return value.
7299 MEM is where VAL is stored. */
7302 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
7306 /* Try an atomic_exchange first. */
7307 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_ACQUIRE
);
7311 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
, MEMMODEL_ACQUIRE
);
7315 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
7319 /* If there are no other options, try atomic_test_and_set if the value
7320 being stored is 1. */
7321 if (val
== const1_rtx
)
7322 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_ACQUIRE
);
7327 /* This function expands the atomic test_and_set operation:
7328 atomically store a boolean TRUE into MEM and return the previous value.
7330 MEMMODEL is the memory model variant to use.
7331 TARGET is an optional place to stick the return value. */
7334 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
7336 enum machine_mode mode
= GET_MODE (mem
);
7337 rtx ret
, trueval
, subtarget
;
7339 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
7343 /* Be binary compatible with non-default settings of trueval, and different
7344 cpu revisions. E.g. one revision may have atomic-test-and-set, but
7345 another only has atomic-exchange. */
7346 if (targetm
.atomic_test_and_set_trueval
== 1)
7348 trueval
= const1_rtx
;
7349 subtarget
= target
? target
: gen_reg_rtx (mode
);
7353 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
7354 subtarget
= gen_reg_rtx (mode
);
7357 /* Try the atomic-exchange optab... */
7358 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
7360 /* ... then an atomic-compare-and-swap loop ... */
7362 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
7364 /* ... before trying the vaguely defined legacy lock_test_and_set. */
7366 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
7368 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
7369 things with the value 1. Thus we try again without trueval. */
7370 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
7371 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
7373 /* Failing all else, assume a single threaded environment and simply
7374 perform the operation. */
7377 /* If the result is ignored skip the move to target. */
7378 if (subtarget
!= const0_rtx
)
7379 emit_move_insn (subtarget
, mem
);
7381 emit_move_insn (mem
, trueval
);
7385 /* Recall that have to return a boolean value; rectify if trueval
7386 is not exactly one. */
7387 if (targetm
.atomic_test_and_set_trueval
!= 1)
7388 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
7393 /* This function expands the atomic exchange operation:
7394 atomically store VAL in MEM and return the previous value in MEM.
7396 MEMMODEL is the memory model variant to use.
7397 TARGET is an optional place to stick the return value. */
7400 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
7404 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7406 /* Next try a compare-and-swap loop for the exchange. */
7408 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
7413 /* This function expands the atomic compare exchange operation:
7415 *PTARGET_BOOL is an optional place to store the boolean success/failure.
7416 *PTARGET_OVAL is an optional place to store the old value from memory.
7417 Both target parameters may be NULL to indicate that we do not care about
7418 that return value. Both target parameters are updated on success to
7419 the actual location of the corresponding result.
7421 MEMMODEL is the memory model variant to use.
7423 The return value of the function is true for success. */
7426 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
7427 rtx mem
, rtx expected
, rtx desired
,
7428 bool is_weak
, enum memmodel succ_model
,
7429 enum memmodel fail_model
)
7431 enum machine_mode mode
= GET_MODE (mem
);
7432 struct expand_operand ops
[8];
7433 enum insn_code icode
;
7434 rtx target_oval
, target_bool
= NULL_RTX
;
7437 /* Load expected into a register for the compare and swap. */
7438 if (MEM_P (expected
))
7439 expected
= copy_to_reg (expected
);
7441 /* Make sure we always have some place to put the return oldval.
7442 Further, make sure that place is distinct from the input expected,
7443 just in case we need that path down below. */
7444 if (ptarget_oval
== NULL
7445 || (target_oval
= *ptarget_oval
) == NULL
7446 || reg_overlap_mentioned_p (expected
, target_oval
))
7447 target_oval
= gen_reg_rtx (mode
);
7449 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
7450 if (icode
!= CODE_FOR_nothing
)
7452 enum machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
7454 /* Make sure we always have a place for the bool operand. */
7455 if (ptarget_bool
== NULL
7456 || (target_bool
= *ptarget_bool
) == NULL
7457 || GET_MODE (target_bool
) != bool_mode
)
7458 target_bool
= gen_reg_rtx (bool_mode
);
7460 /* Emit the compare_and_swap. */
7461 create_output_operand (&ops
[0], target_bool
, bool_mode
);
7462 create_output_operand (&ops
[1], target_oval
, mode
);
7463 create_fixed_operand (&ops
[2], mem
);
7464 create_input_operand (&ops
[3], expected
, mode
);
7465 create_input_operand (&ops
[4], desired
, mode
);
7466 create_integer_operand (&ops
[5], is_weak
);
7467 create_integer_operand (&ops
[6], succ_model
);
7468 create_integer_operand (&ops
[7], fail_model
);
7469 if (maybe_expand_insn (icode
, 8, ops
))
7471 /* Return success/failure. */
7472 target_bool
= ops
[0].value
;
7473 target_oval
= ops
[1].value
;
7478 /* Otherwise fall back to the original __sync_val_compare_and_swap
7479 which is always seq-cst. */
7480 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
7481 if (icode
!= CODE_FOR_nothing
)
7485 create_output_operand (&ops
[0], target_oval
, mode
);
7486 create_fixed_operand (&ops
[1], mem
);
7487 create_input_operand (&ops
[2], expected
, mode
);
7488 create_input_operand (&ops
[3], desired
, mode
);
7489 if (!maybe_expand_insn (icode
, 4, ops
))
7492 target_oval
= ops
[0].value
;
7494 /* If the caller isn't interested in the boolean return value,
7495 skip the computation of it. */
7496 if (ptarget_bool
== NULL
)
7499 /* Otherwise, work out if the compare-and-swap succeeded. */
7501 if (have_insn_for (COMPARE
, CCmode
))
7502 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
7505 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
7506 const0_rtx
, VOIDmode
, 0, 1);
7509 goto success_bool_from_val
;
7512 /* Also check for library support for __sync_val_compare_and_swap. */
7513 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
7514 if (libfunc
!= NULL
)
7516 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7517 target_oval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
7518 mode
, 3, addr
, ptr_mode
,
7519 expected
, mode
, desired
, mode
);
7521 /* Compute the boolean return value only if requested. */
7523 goto success_bool_from_val
;
7531 success_bool_from_val
:
7532 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
7533 expected
, VOIDmode
, 1, 1);
7535 /* Make sure that the oval output winds up where the caller asked. */
7537 *ptarget_oval
= target_oval
;
7539 *ptarget_bool
= target_bool
;
7543 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
7546 expand_asm_memory_barrier (void)
7550 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, empty_string
, empty_string
, 0,
7551 rtvec_alloc (0), rtvec_alloc (0),
7552 rtvec_alloc (0), UNKNOWN_LOCATION
);
7553 MEM_VOLATILE_P (asm_op
) = 1;
7555 clob
= gen_rtx_SCRATCH (VOIDmode
);
7556 clob
= gen_rtx_MEM (BLKmode
, clob
);
7557 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
7559 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
7562 /* This routine will either emit the mem_thread_fence pattern or issue a
7563 sync_synchronize to generate a fence for memory model MEMMODEL. */
7565 #ifndef HAVE_mem_thread_fence
7566 # define HAVE_mem_thread_fence 0
7567 # define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
7569 #ifndef HAVE_memory_barrier
7570 # define HAVE_memory_barrier 0
7571 # define gen_memory_barrier() (gcc_unreachable (), NULL_RTX)
7575 expand_mem_thread_fence (enum memmodel model
)
7577 if (HAVE_mem_thread_fence
)
7578 emit_insn (gen_mem_thread_fence (GEN_INT (model
)));
7579 else if ((model
& MEMMODEL_MASK
) != MEMMODEL_RELAXED
)
7581 if (HAVE_memory_barrier
)
7582 emit_insn (gen_memory_barrier ());
7583 else if (synchronize_libfunc
!= NULL_RTX
)
7584 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
7586 expand_asm_memory_barrier ();
7590 /* This routine will either emit the mem_signal_fence pattern or issue a
7591 sync_synchronize to generate a fence for memory model MEMMODEL. */
7593 #ifndef HAVE_mem_signal_fence
7594 # define HAVE_mem_signal_fence 0
7595 # define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
7599 expand_mem_signal_fence (enum memmodel model
)
7601 if (HAVE_mem_signal_fence
)
7602 emit_insn (gen_mem_signal_fence (GEN_INT (model
)));
7603 else if ((model
& MEMMODEL_MASK
) != MEMMODEL_RELAXED
)
7605 /* By default targets are coherent between a thread and the signal
7606 handler running on the same thread. Thus this really becomes a
7607 compiler barrier, in that stores must not be sunk past
7608 (or raised above) a given point. */
7609 expand_asm_memory_barrier ();
7613 /* This function expands the atomic load operation:
7614 return the atomically loaded value in MEM.
7616 MEMMODEL is the memory model variant to use.
7617 TARGET is an option place to stick the return value. */
7620 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
7622 enum machine_mode mode
= GET_MODE (mem
);
7623 enum insn_code icode
;
7625 /* If the target supports the load directly, great. */
7626 icode
= direct_optab_handler (atomic_load_optab
, mode
);
7627 if (icode
!= CODE_FOR_nothing
)
7629 struct expand_operand ops
[3];
7631 create_output_operand (&ops
[0], target
, mode
);
7632 create_fixed_operand (&ops
[1], mem
);
7633 create_integer_operand (&ops
[2], model
);
7634 if (maybe_expand_insn (icode
, 3, ops
))
7635 return ops
[0].value
;
7638 /* If the size of the object is greater than word size on this target,
7639 then we assume that a load will not be atomic. */
7640 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
7642 /* Issue val = compare_and_swap (mem, 0, 0).
7643 This may cause the occasional harmless store of 0 when the value is
7644 already 0, but it seems to be OK according to the standards guys. */
7645 if (expand_atomic_compare_and_swap (NULL
, &target
, mem
, const0_rtx
,
7646 const0_rtx
, false, model
, model
))
7649 /* Otherwise there is no atomic load, leave the library call. */
7653 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7654 if (!target
|| target
== const0_rtx
)
7655 target
= gen_reg_rtx (mode
);
7657 /* For SEQ_CST, emit a barrier before the load. */
7658 if ((model
& MEMMODEL_MASK
) == MEMMODEL_SEQ_CST
)
7659 expand_mem_thread_fence (model
);
7661 emit_move_insn (target
, mem
);
7663 /* Emit the appropriate barrier after the load. */
7664 expand_mem_thread_fence (model
);
7669 /* This function expands the atomic store operation:
7670 Atomically store VAL in MEM.
7671 MEMMODEL is the memory model variant to use.
7672 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7673 function returns const0_rtx if a pattern was emitted. */
7676 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
7678 enum machine_mode mode
= GET_MODE (mem
);
7679 enum insn_code icode
;
7680 struct expand_operand ops
[3];
7682 /* If the target supports the store directly, great. */
7683 icode
= direct_optab_handler (atomic_store_optab
, mode
);
7684 if (icode
!= CODE_FOR_nothing
)
7686 create_fixed_operand (&ops
[0], mem
);
7687 create_input_operand (&ops
[1], val
, mode
);
7688 create_integer_operand (&ops
[2], model
);
7689 if (maybe_expand_insn (icode
, 3, ops
))
7693 /* If using __sync_lock_release is a viable alternative, try it. */
7696 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
7697 if (icode
!= CODE_FOR_nothing
)
7699 create_fixed_operand (&ops
[0], mem
);
7700 create_input_operand (&ops
[1], const0_rtx
, mode
);
7701 if (maybe_expand_insn (icode
, 2, ops
))
7703 /* lock_release is only a release barrier. */
7704 if ((model
& MEMMODEL_MASK
) == MEMMODEL_SEQ_CST
)
7705 expand_mem_thread_fence (model
);
7711 /* If the size of the object is greater than word size on this target,
7712 a default store will not be atomic, Try a mem_exchange and throw away
7713 the result. If that doesn't work, don't do anything. */
7714 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
7716 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
7718 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
, val
);
7725 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7726 expand_mem_thread_fence (model
);
7728 emit_move_insn (mem
, val
);
7730 /* For SEQ_CST, also emit a barrier after the store. */
7731 if ((model
& MEMMODEL_MASK
) == MEMMODEL_SEQ_CST
)
7732 expand_mem_thread_fence (model
);
7738 /* Structure containing the pointers and values required to process the
7739 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7741 struct atomic_op_functions
7743 direct_optab mem_fetch_before
;
7744 direct_optab mem_fetch_after
;
7745 direct_optab mem_no_result
;
7748 direct_optab no_result
;
7749 enum rtx_code reverse_code
;
7753 /* Fill in structure pointed to by OP with the various optab entries for an
7754 operation of type CODE. */
7757 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
7759 gcc_assert (op
!= NULL
);
7761 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7762 in the source code during compilation, and the optab entries are not
7763 computable until runtime. Fill in the values at runtime. */
7767 op
->mem_fetch_before
= atomic_fetch_add_optab
;
7768 op
->mem_fetch_after
= atomic_add_fetch_optab
;
7769 op
->mem_no_result
= atomic_add_optab
;
7770 op
->fetch_before
= sync_old_add_optab
;
7771 op
->fetch_after
= sync_new_add_optab
;
7772 op
->no_result
= sync_add_optab
;
7773 op
->reverse_code
= MINUS
;
7776 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
7777 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
7778 op
->mem_no_result
= atomic_sub_optab
;
7779 op
->fetch_before
= sync_old_sub_optab
;
7780 op
->fetch_after
= sync_new_sub_optab
;
7781 op
->no_result
= sync_sub_optab
;
7782 op
->reverse_code
= PLUS
;
7785 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
7786 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
7787 op
->mem_no_result
= atomic_xor_optab
;
7788 op
->fetch_before
= sync_old_xor_optab
;
7789 op
->fetch_after
= sync_new_xor_optab
;
7790 op
->no_result
= sync_xor_optab
;
7791 op
->reverse_code
= XOR
;
7794 op
->mem_fetch_before
= atomic_fetch_and_optab
;
7795 op
->mem_fetch_after
= atomic_and_fetch_optab
;
7796 op
->mem_no_result
= atomic_and_optab
;
7797 op
->fetch_before
= sync_old_and_optab
;
7798 op
->fetch_after
= sync_new_and_optab
;
7799 op
->no_result
= sync_and_optab
;
7800 op
->reverse_code
= UNKNOWN
;
7803 op
->mem_fetch_before
= atomic_fetch_or_optab
;
7804 op
->mem_fetch_after
= atomic_or_fetch_optab
;
7805 op
->mem_no_result
= atomic_or_optab
;
7806 op
->fetch_before
= sync_old_ior_optab
;
7807 op
->fetch_after
= sync_new_ior_optab
;
7808 op
->no_result
= sync_ior_optab
;
7809 op
->reverse_code
= UNKNOWN
;
7812 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
7813 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
7814 op
->mem_no_result
= atomic_nand_optab
;
7815 op
->fetch_before
= sync_old_nand_optab
;
7816 op
->fetch_after
= sync_new_nand_optab
;
7817 op
->no_result
= sync_nand_optab
;
7818 op
->reverse_code
= UNKNOWN
;
7825 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7826 using memory order MODEL. If AFTER is true the operation needs to return
7827 the value of *MEM after the operation, otherwise the previous value.
7828 TARGET is an optional place to place the result. The result is unused if
7830 Return the result if there is a better sequence, otherwise NULL_RTX. */
7833 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7834 enum memmodel model
, bool after
)
7836 /* If the value is prefetched, or not used, it may be possible to replace
7837 the sequence with a native exchange operation. */
7838 if (!after
|| target
== const0_rtx
)
7840 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7841 if (code
== AND
&& val
== const0_rtx
)
7843 if (target
== const0_rtx
)
7844 target
= gen_reg_rtx (GET_MODE (mem
));
7845 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7848 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7849 if (code
== IOR
&& val
== constm1_rtx
)
7851 if (target
== const0_rtx
)
7852 target
= gen_reg_rtx (GET_MODE (mem
));
7853 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7860 /* Try to emit an instruction for a specific operation varaition.
7861 OPTAB contains the OP functions.
7862 TARGET is an optional place to return the result. const0_rtx means unused.
7863 MEM is the memory location to operate on.
7864 VAL is the value to use in the operation.
7865 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7866 MODEL is the memory model, if used.
7867 AFTER is true if the returned result is the value after the operation. */
7870 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
7871 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
7873 enum machine_mode mode
= GET_MODE (mem
);
7874 struct expand_operand ops
[4];
7875 enum insn_code icode
;
7879 /* Check to see if there is a result returned. */
7880 if (target
== const0_rtx
)
7884 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
7885 create_integer_operand (&ops
[2], model
);
7890 icode
= direct_optab_handler (optab
->no_result
, mode
);
7894 /* Otherwise, we need to generate a result. */
7899 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
7900 : optab
->mem_fetch_before
, mode
);
7901 create_integer_operand (&ops
[3], model
);
7906 icode
= optab_handler (after
? optab
->fetch_after
7907 : optab
->fetch_before
, mode
);
7910 create_output_operand (&ops
[op_counter
++], target
, mode
);
7912 if (icode
== CODE_FOR_nothing
)
7915 create_fixed_operand (&ops
[op_counter
++], mem
);
7916 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7917 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
7919 if (maybe_expand_insn (icode
, num_ops
, ops
))
7920 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
7926 /* This function expands an atomic fetch_OP or OP_fetch operation:
7927 TARGET is an option place to stick the return value. const0_rtx indicates
7928 the result is unused.
7929 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7930 CODE is the operation being performed (OP)
7931 MEMMODEL is the memory model variant to use.
7932 AFTER is true to return the result of the operation (OP_fetch).
7933 AFTER is false to return the value before the operation (fetch_OP).
7935 This function will *only* generate instructions if there is a direct
7936 optab. No compare and swap loops or libcalls will be generated. */
7939 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
7940 enum rtx_code code
, enum memmodel model
,
7943 enum machine_mode mode
= GET_MODE (mem
);
7944 struct atomic_op_functions optab
;
7946 bool unused_result
= (target
== const0_rtx
);
7948 get_atomic_op_for_code (&optab
, code
);
7950 /* Check to see if there are any better instructions. */
7951 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
7955 /* Check for the case where the result isn't used and try those patterns. */
7958 /* Try the memory model variant first. */
7959 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
7963 /* Next try the old style withuot a memory model. */
7964 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
7968 /* There is no no-result pattern, so try patterns with a result. */
7972 /* Try the __atomic version. */
7973 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
7977 /* Try the older __sync version. */
7978 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
7982 /* If the fetch value can be calculated from the other variation of fetch,
7983 try that operation. */
7984 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
7986 /* Try the __atomic version, then the older __sync version. */
7987 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
7989 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
7993 /* If the result isn't used, no need to do compensation code. */
7997 /* Issue compensation code. Fetch_after == fetch_before OP val.
7998 Fetch_before == after REVERSE_OP val. */
8000 code
= optab
.reverse_code
;
8003 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
8004 true, OPTAB_LIB_WIDEN
);
8005 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
8008 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
8009 true, OPTAB_LIB_WIDEN
);
8014 /* No direct opcode can be generated. */
8020 /* This function expands an atomic fetch_OP or OP_fetch operation:
8021 TARGET is an option place to stick the return value. const0_rtx indicates
8022 the result is unused.
8023 atomically fetch MEM, perform the operation with VAL and return it to MEM.
8024 CODE is the operation being performed (OP)
8025 MEMMODEL is the memory model variant to use.
8026 AFTER is true to return the result of the operation (OP_fetch).
8027 AFTER is false to return the value before the operation (fetch_OP). */
8029 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
8030 enum memmodel model
, bool after
)
8032 enum machine_mode mode
= GET_MODE (mem
);
8034 bool unused_result
= (target
== const0_rtx
);
8036 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
8042 /* Add/sub can be implemented by doing the reverse operation with -(val). */
8043 if (code
== PLUS
|| code
== MINUS
)
8046 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
8049 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
8050 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
8054 /* PLUS worked so emit the insns and return. */
8061 /* PLUS did not work, so throw away the negation code and continue. */
8065 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
8066 if (!can_compare_and_swap_p (mode
, false))
8070 enum rtx_code orig_code
= code
;
8071 struct atomic_op_functions optab
;
8073 get_atomic_op_for_code (&optab
, code
);
8074 libfunc
= optab_libfunc (after
? optab
.fetch_after
8075 : optab
.fetch_before
, mode
);
8077 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
8081 code
= optab
.reverse_code
;
8082 libfunc
= optab_libfunc (after
? optab
.fetch_before
8083 : optab
.fetch_after
, mode
);
8085 if (libfunc
!= NULL
)
8087 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
8088 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
8089 2, addr
, ptr_mode
, val
, mode
);
8091 if (!unused_result
&& fixup
)
8092 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
8093 true, OPTAB_LIB_WIDEN
);
8097 /* We need the original code for any further attempts. */
8101 /* If nothing else has succeeded, default to a compare and swap loop. */
8102 if (can_compare_and_swap_p (mode
, true))
8105 rtx t0
= gen_reg_rtx (mode
), t1
;
8109 /* If the result is used, get a register for it. */
8112 if (!target
|| !register_operand (target
, mode
))
8113 target
= gen_reg_rtx (mode
);
8114 /* If fetch_before, copy the value now. */
8116 emit_move_insn (target
, t0
);
8119 target
= const0_rtx
;
8124 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
8125 true, OPTAB_LIB_WIDEN
);
8126 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
8129 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
8132 /* For after, copy the value now. */
8133 if (!unused_result
&& after
)
8134 emit_move_insn (target
, t1
);
8135 insn
= get_insns ();
8138 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
8145 /* Return true if OPERAND is suitable for operand number OPNO of
8146 instruction ICODE. */
8149 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
8151 return (!insn_data
[(int) icode
].operand
[opno
].predicate
8152 || (insn_data
[(int) icode
].operand
[opno
].predicate
8153 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
8156 /* TARGET is a target of a multiword operation that we are going to
8157 implement as a series of word-mode operations. Return true if
8158 TARGET is suitable for this purpose. */
8161 valid_multiword_target_p (rtx target
)
8163 enum machine_mode mode
;
8166 mode
= GET_MODE (target
);
8167 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
8168 if (!validate_subreg (word_mode
, mode
, target
, i
))
8173 /* Like maybe_legitimize_operand, but do not change the code of the
8174 current rtx value. */
8177 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
8178 struct expand_operand
*op
)
8180 /* See if the operand matches in its current form. */
8181 if (insn_operand_matches (icode
, opno
, op
->value
))
8184 /* If the operand is a memory whose address has no side effects,
8185 try forcing the address into a non-virtual pseudo register.
8186 The check for side effects is important because copy_to_mode_reg
8187 cannot handle things like auto-modified addresses. */
8188 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
8193 addr
= XEXP (mem
, 0);
8194 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
8195 && !side_effects_p (addr
))
8198 enum machine_mode mode
;
8200 last
= get_last_insn ();
8201 mode
= get_address_mode (mem
);
8202 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
8203 if (insn_operand_matches (icode
, opno
, mem
))
8208 delete_insns_since (last
);
8215 /* Try to make OP match operand OPNO of instruction ICODE. Return true
8216 on success, storing the new operand value back in OP. */
8219 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
8220 struct expand_operand
*op
)
8222 enum machine_mode mode
, imode
;
8223 bool old_volatile_ok
, result
;
8229 old_volatile_ok
= volatile_ok
;
8231 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
8232 volatile_ok
= old_volatile_ok
;
8236 gcc_assert (mode
!= VOIDmode
);
8238 && op
->value
!= const0_rtx
8239 && GET_MODE (op
->value
) == mode
8240 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
8243 op
->value
= gen_reg_rtx (mode
);
8248 gcc_assert (mode
!= VOIDmode
);
8249 gcc_assert (GET_MODE (op
->value
) == VOIDmode
8250 || GET_MODE (op
->value
) == mode
);
8251 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
8254 op
->value
= copy_to_mode_reg (mode
, op
->value
);
8257 case EXPAND_CONVERT_TO
:
8258 gcc_assert (mode
!= VOIDmode
);
8259 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
8262 case EXPAND_CONVERT_FROM
:
8263 if (GET_MODE (op
->value
) != VOIDmode
)
8264 mode
= GET_MODE (op
->value
);
8266 /* The caller must tell us what mode this value has. */
8267 gcc_assert (mode
!= VOIDmode
);
8269 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
8270 if (imode
!= VOIDmode
&& imode
!= mode
)
8272 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
8277 case EXPAND_ADDRESS
:
8278 gcc_assert (mode
!= VOIDmode
);
8279 op
->value
= convert_memory_address (mode
, op
->value
);
8282 case EXPAND_INTEGER
:
8283 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
8284 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
8288 return insn_operand_matches (icode
, opno
, op
->value
);
8291 /* Make OP describe an input operand that should have the same value
8292 as VALUE, after any mode conversion that the target might request.
8293 TYPE is the type of VALUE. */
8296 create_convert_operand_from_type (struct expand_operand
*op
,
8297 rtx value
, tree type
)
8299 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
8300 TYPE_UNSIGNED (type
));
8303 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8304 of instruction ICODE. Return true on success, leaving the new operand
8305 values in the OPS themselves. Emit no code on failure. */
8308 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
8309 unsigned int nops
, struct expand_operand
*ops
)
8314 last
= get_last_insn ();
8315 for (i
= 0; i
< nops
; i
++)
8316 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
8318 delete_insns_since (last
);
8324 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8325 as its operands. Return the instruction pattern on success,
8326 and emit any necessary set-up code. Return null and emit no
8330 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
8331 struct expand_operand
*ops
)
8333 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
8334 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
8340 return GEN_FCN (icode
) (ops
[0].value
);
8342 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
8344 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
8346 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8349 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8350 ops
[3].value
, ops
[4].value
);
8352 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8353 ops
[3].value
, ops
[4].value
, ops
[5].value
);
8355 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8356 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8359 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8360 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8361 ops
[6].value
, ops
[7].value
);
8363 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8364 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8365 ops
[6].value
, ops
[7].value
, ops
[8].value
);
8370 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8371 as its operands. Return true on success and emit no code on failure. */
8374 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
8375 struct expand_operand
*ops
)
8377 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
8386 /* Like maybe_expand_insn, but for jumps. */
8389 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8390 struct expand_operand
*ops
)
8392 rtx pat
= maybe_gen_insn (icode
, nops
, ops
);
8395 emit_jump_insn (pat
);
8401 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8405 expand_insn (enum insn_code icode
, unsigned int nops
,
8406 struct expand_operand
*ops
)
8408 if (!maybe_expand_insn (icode
, nops
, ops
))
8412 /* Like expand_insn, but for jumps. */
8415 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8416 struct expand_operand
*ops
)
8418 if (!maybe_expand_jump_insn (icode
, nops
, ops
))
8422 /* Reduce conditional compilation elsewhere. */
8425 #define CODE_FOR_insv CODE_FOR_nothing
8429 #define CODE_FOR_extv CODE_FOR_nothing
8432 #define HAVE_extzv 0
8433 #define CODE_FOR_extzv CODE_FOR_nothing
8436 /* Enumerates the possible types of structure operand to an
8438 enum extraction_type
{ ET_unaligned_mem
, ET_reg
};
8440 /* Check whether insv, extv or extzv pattern ICODE can be used for an
8441 insertion or extraction of type TYPE on a structure of mode MODE.
8442 Return true if so and fill in *INSN accordingly. STRUCT_OP is the
8443 operand number of the structure (the first sign_extract or zero_extract
8444 operand) and FIELD_OP is the operand number of the field (the other
8445 side of the set from the sign_extract or zero_extract). */
8448 get_traditional_extraction_insn (extraction_insn
*insn
,
8449 enum extraction_type type
,
8450 enum machine_mode mode
,
8451 enum insn_code icode
,
8452 int struct_op
, int field_op
)
8454 const struct insn_data_d
*data
= &insn_data
[icode
];
8456 enum machine_mode struct_mode
= data
->operand
[struct_op
].mode
;
8457 if (struct_mode
== VOIDmode
)
8458 struct_mode
= word_mode
;
8459 if (mode
!= struct_mode
)
8462 enum machine_mode field_mode
= data
->operand
[field_op
].mode
;
8463 if (field_mode
== VOIDmode
)
8464 field_mode
= word_mode
;
8466 enum machine_mode pos_mode
= data
->operand
[struct_op
+ 2].mode
;
8467 if (pos_mode
== VOIDmode
)
8468 pos_mode
= word_mode
;
8470 insn
->icode
= icode
;
8471 insn
->field_mode
= field_mode
;
8472 insn
->struct_mode
= (type
== ET_unaligned_mem
? byte_mode
: struct_mode
);
8473 insn
->pos_mode
= pos_mode
;
8477 /* Return true if an optab exists to perform an insertion or extraction
8478 of type TYPE in mode MODE. Describe the instruction in *INSN if so.
8480 REG_OPTAB is the optab to use for register structures and
8481 MISALIGN_OPTAB is the optab to use for misaligned memory structures.
8482 POS_OP is the operand number of the bit position. */
8485 get_optab_extraction_insn (struct extraction_insn
*insn
,
8486 enum extraction_type type
,
8487 enum machine_mode mode
, direct_optab reg_optab
,
8488 direct_optab misalign_optab
, int pos_op
)
8490 direct_optab optab
= (type
== ET_unaligned_mem
? misalign_optab
: reg_optab
);
8491 enum insn_code icode
= direct_optab_handler (optab
, mode
);
8492 if (icode
== CODE_FOR_nothing
)
8495 const struct insn_data_d
*data
= &insn_data
[icode
];
8497 insn
->icode
= icode
;
8498 insn
->field_mode
= mode
;
8499 insn
->struct_mode
= (type
== ET_unaligned_mem
? BLKmode
: mode
);
8500 insn
->pos_mode
= data
->operand
[pos_op
].mode
;
8501 if (insn
->pos_mode
== VOIDmode
)
8502 insn
->pos_mode
= word_mode
;
8506 /* Return true if an instruction exists to perform an insertion or
8507 extraction (PATTERN says which) of type TYPE in mode MODE.
8508 Describe the instruction in *INSN if so. */
8511 get_extraction_insn (extraction_insn
*insn
,
8512 enum extraction_pattern pattern
,
8513 enum extraction_type type
,
8514 enum machine_mode mode
)
8520 && get_traditional_extraction_insn (insn
, type
, mode
,
8521 CODE_FOR_insv
, 0, 3))
8523 return get_optab_extraction_insn (insn
, type
, mode
, insv_optab
,
8524 insvmisalign_optab
, 2);
8528 && get_traditional_extraction_insn (insn
, type
, mode
,
8529 CODE_FOR_extv
, 1, 0))
8531 return get_optab_extraction_insn (insn
, type
, mode
, extv_optab
,
8532 extvmisalign_optab
, 3);
8536 && get_traditional_extraction_insn (insn
, type
, mode
,
8537 CODE_FOR_extzv
, 1, 0))
8539 return get_optab_extraction_insn (insn
, type
, mode
, extzv_optab
,
8540 extzvmisalign_optab
, 3);
8547 /* Return true if an instruction exists to access a field of mode
8548 FIELDMODE in a structure that has STRUCT_BITS significant bits.
8549 Describe the "best" such instruction in *INSN if so. PATTERN and
8550 TYPE describe the type of insertion or extraction we want to perform.
8552 For an insertion, the number of significant structure bits includes
8553 all bits of the target. For an extraction, it need only include the
8554 most significant bit of the field. Larger widths are acceptable
8558 get_best_extraction_insn (extraction_insn
*insn
,
8559 enum extraction_pattern pattern
,
8560 enum extraction_type type
,
8561 unsigned HOST_WIDE_INT struct_bits
,
8562 enum machine_mode field_mode
)
8564 enum machine_mode mode
= smallest_mode_for_size (struct_bits
, MODE_INT
);
8565 while (mode
!= VOIDmode
)
8567 if (get_extraction_insn (insn
, pattern
, type
, mode
))
8569 while (mode
!= VOIDmode
8570 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (field_mode
)
8571 && !TRULY_NOOP_TRUNCATION_MODES_P (insn
->field_mode
,
8574 get_extraction_insn (insn
, pattern
, type
, mode
);
8575 mode
= GET_MODE_WIDER_MODE (mode
);
8579 mode
= GET_MODE_WIDER_MODE (mode
);
8584 /* Return true if an instruction exists to access a field of mode
8585 FIELDMODE in a register structure that has STRUCT_BITS significant bits.
8586 Describe the "best" such instruction in *INSN if so. PATTERN describes
8587 the type of insertion or extraction we want to perform.
8589 For an insertion, the number of significant structure bits includes
8590 all bits of the target. For an extraction, it need only include the
8591 most significant bit of the field. Larger widths are acceptable
8595 get_best_reg_extraction_insn (extraction_insn
*insn
,
8596 enum extraction_pattern pattern
,
8597 unsigned HOST_WIDE_INT struct_bits
,
8598 enum machine_mode field_mode
)
8600 return get_best_extraction_insn (insn
, pattern
, ET_reg
, struct_bits
,
8604 /* Return true if an instruction exists to access a field of BITSIZE
8605 bits starting BITNUM bits into a memory structure. Describe the
8606 "best" such instruction in *INSN if so. PATTERN describes the type
8607 of insertion or extraction we want to perform and FIELDMODE is the
8608 natural mode of the extracted field.
8610 The instructions considered here only access bytes that overlap
8611 the bitfield; they do not touch any surrounding bytes. */
8614 get_best_mem_extraction_insn (extraction_insn
*insn
,
8615 enum extraction_pattern pattern
,
8616 HOST_WIDE_INT bitsize
, HOST_WIDE_INT bitnum
,
8617 enum machine_mode field_mode
)
8619 unsigned HOST_WIDE_INT struct_bits
= (bitnum
% BITS_PER_UNIT
8621 + BITS_PER_UNIT
- 1);
8622 struct_bits
-= struct_bits
% BITS_PER_UNIT
;
8623 return get_best_extraction_insn (insn
, pattern
, ET_unaligned_mem
,
8624 struct_bits
, field_mode
);
8627 #include "gt-optabs.h"