1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[COI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
149 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
151 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code
) != RTX_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_UNARY
)
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target
, op0
)
179 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
181 insn
= PREV_INSN (last_insn
);
182 while (insn
!= NULL_RTX
)
184 if (reg_set_p (target
, insn
))
187 insn
= PREV_INSN (insn
);
191 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
192 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
194 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
196 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
209 int unsignedp
, int no_extend
)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
222 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
223 return convert_modes (mode
, oldmode
, op
, unsignedp
);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
228 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result
= gen_reg_rtx (mode
);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
235 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code
, tree type
)
257 return one_cmpl_optab
;
266 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
274 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
280 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
289 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
292 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
294 case REALIGN_LOAD_EXPR
:
295 return vec_realign_load_optab
;
298 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
301 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
303 case REDUC_PLUS_EXPR
:
304 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
306 case VEC_LSHIFT_EXPR
:
307 return vec_shl_optab
;
309 case VEC_RSHIFT_EXPR
:
310 return vec_shr_optab
;
316 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
320 return trapv
? addv_optab
: add_optab
;
323 return trapv
? subv_optab
: sub_optab
;
326 return trapv
? smulv_optab
: smul_optab
;
329 return trapv
? negv_optab
: neg_optab
;
332 return trapv
? absv_optab
: abs_optab
;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
353 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
355 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
356 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
357 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
358 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
361 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
363 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
364 != CODE_FOR_nothing
);
366 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
367 temp
= gen_reg_rtx (mode
);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
378 xop0
= convert_modes (mode0
,
379 GET_MODE (op0
) != VOIDmode
384 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
385 xop1
= convert_modes (mode1
,
386 GET_MODE (op1
) != VOIDmode
391 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
392 xop2
= convert_modes (mode2
,
393 GET_MODE (op2
) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
402 && mode0
!= VOIDmode
)
403 xop0
= copy_to_mode_reg (mode0
, xop0
);
405 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
406 && mode1
!= VOIDmode
)
407 xop1
= copy_to_mode_reg (mode1
, xop1
);
409 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
410 && mode2
!= VOIDmode
)
411 xop2
= copy_to_mode_reg (mode2
, xop2
);
413 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
426 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
427 enum optab_methods methods
)
429 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
431 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
437 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
440 /* Like simplify_expand_binop, but always put the result in TARGET.
441 Return true if the expansion succeeded. */
444 force_expand_binop (enum machine_mode mode
, optab binoptab
,
445 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
446 enum optab_methods methods
)
448 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
449 target
, unsignedp
, methods
);
453 emit_move_insn (target
, x
);
457 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
460 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
462 enum insn_code icode
;
463 rtx rtx_op1
, rtx_op2
;
464 enum machine_mode mode1
;
465 enum machine_mode mode2
;
466 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
467 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
468 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
472 switch (TREE_CODE (vec_shift_expr
))
474 case VEC_RSHIFT_EXPR
:
475 shift_optab
= vec_shr_optab
;
477 case VEC_LSHIFT_EXPR
:
478 shift_optab
= vec_shl_optab
;
484 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
485 gcc_assert (icode
!= CODE_FOR_nothing
);
487 mode1
= insn_data
[icode
].operand
[1].mode
;
488 mode2
= insn_data
[icode
].operand
[2].mode
;
490 rtx_op1
= expand_expr (vec_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
491 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
492 && mode1
!= VOIDmode
)
493 rtx_op1
= force_reg (mode1
, rtx_op1
);
495 rtx_op2
= expand_expr (shift_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
496 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
497 && mode2
!= VOIDmode
)
498 rtx_op2
= force_reg (mode2
, rtx_op2
);
501 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
502 target
= gen_reg_rtx (mode
);
504 /* Emit instruction */
505 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
512 /* This subroutine of expand_doubleword_shift handles the cases in which
513 the effective shift value is >= BITS_PER_WORD. The arguments and return
514 value are the same as for the parent routine, except that SUPERWORD_OP1
515 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
516 INTO_TARGET may be null if the caller has decided to calculate it. */
519 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
520 rtx outof_target
, rtx into_target
,
521 int unsignedp
, enum optab_methods methods
)
523 if (into_target
!= 0)
524 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
525 into_target
, unsignedp
, methods
))
528 if (outof_target
!= 0)
530 /* For a signed right shift, we must fill OUTOF_TARGET with copies
531 of the sign bit, otherwise we must fill it with zeros. */
532 if (binoptab
!= ashr_optab
)
533 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
535 if (!force_expand_binop (word_mode
, binoptab
,
536 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
537 outof_target
, unsignedp
, methods
))
543 /* This subroutine of expand_doubleword_shift handles the cases in which
544 the effective shift value is < BITS_PER_WORD. The arguments and return
545 value are the same as for the parent routine. */
548 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
549 rtx outof_input
, rtx into_input
, rtx op1
,
550 rtx outof_target
, rtx into_target
,
551 int unsignedp
, enum optab_methods methods
,
552 unsigned HOST_WIDE_INT shift_mask
)
554 optab reverse_unsigned_shift
, unsigned_shift
;
557 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
558 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
560 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
561 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
562 the opposite direction to BINOPTAB. */
563 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
565 carries
= outof_input
;
566 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
567 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
572 /* We must avoid shifting by BITS_PER_WORD bits since that is either
573 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
574 has unknown behavior. Do a single shift first, then shift by the
575 remainder. It's OK to use ~OP1 as the remainder if shift counts
576 are truncated to the mode size. */
577 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
578 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
579 if (shift_mask
== BITS_PER_WORD
- 1)
581 tmp
= immed_double_const (-1, -1, op1_mode
);
582 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
587 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
588 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
592 if (tmp
== 0 || carries
== 0)
594 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
595 carries
, tmp
, 0, unsignedp
, methods
);
599 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
600 so the result can go directly into INTO_TARGET if convenient. */
601 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
602 into_target
, unsignedp
, methods
);
606 /* Now OR in the bits carried over from OUTOF_INPUT. */
607 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
608 into_target
, unsignedp
, methods
))
611 /* Use a standard word_mode shift for the out-of half. */
612 if (outof_target
!= 0)
613 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
614 outof_target
, unsignedp
, methods
))
621 #ifdef HAVE_conditional_move
622 /* Try implementing expand_doubleword_shift using conditional moves.
623 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
624 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
625 are the shift counts to use in the former and latter case. All other
626 arguments are the same as the parent routine. */
629 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
630 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
631 rtx outof_input
, rtx into_input
,
632 rtx subword_op1
, rtx superword_op1
,
633 rtx outof_target
, rtx into_target
,
634 int unsignedp
, enum optab_methods methods
,
635 unsigned HOST_WIDE_INT shift_mask
)
637 rtx outof_superword
, into_superword
;
639 /* Put the superword version of the output into OUTOF_SUPERWORD and
641 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
642 if (outof_target
!= 0 && subword_op1
== superword_op1
)
644 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
645 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
646 into_superword
= outof_target
;
647 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
648 outof_superword
, 0, unsignedp
, methods
))
653 into_superword
= gen_reg_rtx (word_mode
);
654 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
655 outof_superword
, into_superword
,
660 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
661 if (!expand_subword_shift (op1_mode
, binoptab
,
662 outof_input
, into_input
, subword_op1
,
663 outof_target
, into_target
,
664 unsignedp
, methods
, shift_mask
))
667 /* Select between them. Do the INTO half first because INTO_SUPERWORD
668 might be the current value of OUTOF_TARGET. */
669 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
670 into_target
, into_superword
, word_mode
, false))
673 if (outof_target
!= 0)
674 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
675 outof_target
, outof_superword
,
683 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
684 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
685 input operand; the shift moves bits in the direction OUTOF_INPUT->
686 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
687 of the target. OP1 is the shift count and OP1_MODE is its mode.
688 If OP1 is constant, it will have been truncated as appropriate
689 and is known to be nonzero.
691 If SHIFT_MASK is zero, the result of word shifts is undefined when the
692 shift count is outside the range [0, BITS_PER_WORD). This routine must
693 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
695 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
696 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
697 fill with zeros or sign bits as appropriate.
699 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
700 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
701 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
702 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
705 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
706 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
707 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
708 function wants to calculate it itself.
710 Return true if the shift could be successfully synthesized. */
713 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
714 rtx outof_input
, rtx into_input
, rtx op1
,
715 rtx outof_target
, rtx into_target
,
716 int unsignedp
, enum optab_methods methods
,
717 unsigned HOST_WIDE_INT shift_mask
)
719 rtx superword_op1
, tmp
, cmp1
, cmp2
;
720 rtx subword_label
, done_label
;
721 enum rtx_code cmp_code
;
723 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
724 fill the result with sign or zero bits as appropriate. If so, the value
725 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
726 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
727 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
729 This isn't worthwhile for constant shifts since the optimizers will
730 cope better with in-range shift counts. */
731 if (shift_mask
>= BITS_PER_WORD
733 && !CONSTANT_P (op1
))
735 if (!expand_doubleword_shift (op1_mode
, binoptab
,
736 outof_input
, into_input
, op1
,
738 unsignedp
, methods
, shift_mask
))
740 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
741 outof_target
, unsignedp
, methods
))
746 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
747 is true when the effective shift value is less than BITS_PER_WORD.
748 Set SUPERWORD_OP1 to the shift count that should be used to shift
749 OUTOF_INPUT into INTO_TARGET when the condition is false. */
750 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
751 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
753 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
754 is a subword shift count. */
755 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
757 cmp2
= CONST0_RTX (op1_mode
);
763 /* Set CMP1 to OP1 - BITS_PER_WORD. */
764 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
766 cmp2
= CONST0_RTX (op1_mode
);
768 superword_op1
= cmp1
;
773 /* If we can compute the condition at compile time, pick the
774 appropriate subroutine. */
775 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
776 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
778 if (tmp
== const0_rtx
)
779 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
780 outof_target
, into_target
,
783 return expand_subword_shift (op1_mode
, binoptab
,
784 outof_input
, into_input
, op1
,
785 outof_target
, into_target
,
786 unsignedp
, methods
, shift_mask
);
789 #ifdef HAVE_conditional_move
790 /* Try using conditional moves to generate straight-line code. */
792 rtx start
= get_last_insn ();
793 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
794 cmp_code
, cmp1
, cmp2
,
795 outof_input
, into_input
,
797 outof_target
, into_target
,
798 unsignedp
, methods
, shift_mask
))
800 delete_insns_since (start
);
804 /* As a last resort, use branches to select the correct alternative. */
805 subword_label
= gen_label_rtx ();
806 done_label
= gen_label_rtx ();
809 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
810 0, 0, subword_label
);
813 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
814 outof_target
, into_target
,
818 emit_jump_insn (gen_jump (done_label
));
820 emit_label (subword_label
);
822 if (!expand_subword_shift (op1_mode
, binoptab
,
823 outof_input
, into_input
, op1
,
824 outof_target
, into_target
,
825 unsignedp
, methods
, shift_mask
))
828 emit_label (done_label
);
832 /* Subroutine of expand_binop. Perform a double word multiplication of
833 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
834 as the target's word_mode. This function return NULL_RTX if anything
835 goes wrong, in which case it may have already emitted instructions
836 which need to be deleted.
838 If we want to multiply two two-word values and have normal and widening
839 multiplies of single-word values, we can do this with three smaller
840 multiplications. Note that we do not make a REG_NO_CONFLICT block here
841 because we are not operating on one word at a time.
843 The multiplication proceeds as follows:
844 _______________________
845 [__op0_high_|__op0_low__]
846 _______________________
847 * [__op1_high_|__op1_low__]
848 _______________________________________________
849 _______________________
850 (1) [__op0_low__*__op1_low__]
851 _______________________
852 (2a) [__op0_low__*__op1_high_]
853 _______________________
854 (2b) [__op0_high_*__op1_low__]
855 _______________________
856 (3) [__op0_high_*__op1_high_]
859 This gives a 4-word result. Since we are only interested in the
860 lower 2 words, partial result (3) and the upper words of (2a) and
861 (2b) don't need to be calculated. Hence (2a) and (2b) can be
862 calculated using non-widening multiplication.
864 (1), however, needs to be calculated with an unsigned widening
865 multiplication. If this operation is not directly supported we
866 try using a signed widening multiplication and adjust the result.
867 This adjustment works as follows:
869 If both operands are positive then no adjustment is needed.
871 If the operands have different signs, for example op0_low < 0 and
872 op1_low >= 0, the instruction treats the most significant bit of
873 op0_low as a sign bit instead of a bit with significance
874 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
875 with 2**BITS_PER_WORD - op0_low, and two's complements the
876 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
879 Similarly, if both operands are negative, we need to add
880 (op0_low + op1_low) * 2**BITS_PER_WORD.
882 We use a trick to adjust quickly. We logically shift op0_low right
883 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
884 op0_high (op1_high) before it is used to calculate 2b (2a). If no
885 logical shift exists, we do an arithmetic right shift and subtract
889 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
890 bool umulp
, enum optab_methods methods
)
892 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
893 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
894 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
895 rtx product
, adjust
, product_high
, temp
;
897 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
898 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
899 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
900 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
902 /* If we're using an unsigned multiply to directly compute the product
903 of the low-order words of the operands and perform any required
904 adjustments of the operands, we begin by trying two more multiplications
905 and then computing the appropriate sum.
907 We have checked above that the required addition is provided.
908 Full-word addition will normally always succeed, especially if
909 it is provided at all, so we don't worry about its failure. The
910 multiplication may well fail, however, so we do handle that. */
914 /* ??? This could be done with emit_store_flag where available. */
915 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
916 NULL_RTX
, 1, methods
);
918 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
919 NULL_RTX
, 0, OPTAB_DIRECT
);
922 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
923 NULL_RTX
, 0, methods
);
926 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
927 NULL_RTX
, 0, OPTAB_DIRECT
);
934 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
935 NULL_RTX
, 0, OPTAB_DIRECT
);
939 /* OP0_HIGH should now be dead. */
943 /* ??? This could be done with emit_store_flag where available. */
944 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
945 NULL_RTX
, 1, methods
);
947 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
948 NULL_RTX
, 0, OPTAB_DIRECT
);
951 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
952 NULL_RTX
, 0, methods
);
955 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
956 NULL_RTX
, 0, OPTAB_DIRECT
);
963 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
964 NULL_RTX
, 0, OPTAB_DIRECT
);
968 /* OP1_HIGH should now be dead. */
970 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
971 adjust
, 0, OPTAB_DIRECT
);
973 if (target
&& !REG_P (target
))
977 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
978 target
, 1, OPTAB_DIRECT
);
980 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
981 target
, 1, OPTAB_DIRECT
);
986 product_high
= operand_subword (product
, high
, 1, mode
);
987 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
988 REG_P (product_high
) ? product_high
: adjust
,
990 emit_move_insn (product_high
, adjust
);
994 /* Wrapper around expand_binop which takes an rtx code to specify
995 the operation to perform, not an optab pointer. All other
996 arguments are the same. */
998 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
999 rtx op1
, rtx target
, int unsignedp
,
1000 enum optab_methods methods
)
1002 optab binop
= code_to_optab
[(int) code
];
1005 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1008 /* Generate code to perform an operation specified by BINOPTAB
1009 on operands OP0 and OP1, with result having machine-mode MODE.
1011 UNSIGNEDP is for the case where we have to widen the operands
1012 to perform the operation. It says to use zero-extension.
1014 If TARGET is nonzero, the value
1015 is generated there, if it is convenient to do so.
1016 In all cases an rtx is returned for the locus of the value;
1017 this may or may not be TARGET. */
1020 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1021 rtx target
, int unsignedp
, enum optab_methods methods
)
1023 enum optab_methods next_methods
1024 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1025 ? OPTAB_WIDEN
: methods
);
1026 enum mode_class
class;
1027 enum machine_mode wider_mode
;
1029 int commutative_op
= 0;
1030 int shift_op
= (binoptab
->code
== ASHIFT
1031 || binoptab
->code
== ASHIFTRT
1032 || binoptab
->code
== LSHIFTRT
1033 || binoptab
->code
== ROTATE
1034 || binoptab
->code
== ROTATERT
);
1035 rtx entry_last
= get_last_insn ();
1037 bool first_pass_p
= true;
1039 class = GET_MODE_CLASS (mode
);
1041 /* If subtracting an integer constant, convert this into an addition of
1042 the negated constant. */
1044 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1046 op1
= negate_rtx (mode
, op1
);
1047 binoptab
= add_optab
;
1050 /* If we are inside an appropriately-short loop and we are optimizing,
1051 force expensive constants into a register. */
1052 if (CONSTANT_P (op0
) && optimize
1053 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1055 if (GET_MODE (op0
) != VOIDmode
)
1056 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1057 op0
= force_reg (mode
, op0
);
1060 if (CONSTANT_P (op1
) && optimize
1061 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1063 if (GET_MODE (op1
) != VOIDmode
)
1064 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1065 op1
= force_reg (mode
, op1
);
1068 /* Record where to delete back to if we backtrack. */
1069 last
= get_last_insn ();
1071 /* If operation is commutative,
1072 try to make the first operand a register.
1073 Even better, try to make it the same as the target.
1074 Also try to make the last operand a constant. */
1075 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1076 || binoptab
== smul_widen_optab
1077 || binoptab
== umul_widen_optab
1078 || binoptab
== smul_highpart_optab
1079 || binoptab
== umul_highpart_optab
)
1083 if (((target
== 0 || REG_P (target
))
1087 : rtx_equal_p (op1
, target
))
1088 || GET_CODE (op0
) == CONST_INT
)
1098 /* If we can do it with a three-operand insn, do so. */
1100 if (methods
!= OPTAB_MUST_WIDEN
1101 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1103 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1104 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1105 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1107 rtx xop0
= op0
, xop1
= op1
;
1112 temp
= gen_reg_rtx (mode
);
1114 /* If it is a commutative operator and the modes would match
1115 if we would swap the operands, we can save the conversions. */
1118 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1119 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1123 tmp
= op0
; op0
= op1
; op1
= tmp
;
1124 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1128 /* In case the insn wants input operands in modes different from
1129 those of the actual operands, convert the operands. It would
1130 seem that we don't need to convert CONST_INTs, but we do, so
1131 that they're properly zero-extended, sign-extended or truncated
1134 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1135 xop0
= convert_modes (mode0
,
1136 GET_MODE (op0
) != VOIDmode
1141 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1142 xop1
= convert_modes (mode1
,
1143 GET_MODE (op1
) != VOIDmode
1148 /* Now, if insn's predicates don't allow our operands, put them into
1151 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1152 && mode0
!= VOIDmode
)
1153 xop0
= copy_to_mode_reg (mode0
, xop0
);
1155 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1156 && mode1
!= VOIDmode
)
1157 xop1
= copy_to_mode_reg (mode1
, xop1
);
1159 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
1160 temp
= gen_reg_rtx (mode
);
1162 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1165 /* If PAT is composed of more than one insn, try to add an appropriate
1166 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1167 operand, call ourselves again, this time without a target. */
1168 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1169 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1171 delete_insns_since (last
);
1172 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1173 unsignedp
, methods
);
1180 delete_insns_since (last
);
1183 /* If we were trying to rotate by a constant value, and that didn't
1184 work, try rotating the other direction before falling back to
1185 shifts and bitwise-or. */
1187 && (binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1188 && class == MODE_INT
1189 && GET_CODE (op1
) == CONST_INT
1191 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1193 first_pass_p
= false;
1194 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
1195 binoptab
= binoptab
== rotl_optab
? rotr_optab
: rotl_optab
;
1199 /* If this is a multiply, see if we can do a widening operation that
1200 takes operands of this mode and makes a wider mode. */
1202 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1203 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1204 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1205 != CODE_FOR_nothing
))
1207 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1208 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1209 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1213 if (GET_MODE_CLASS (mode
) == MODE_INT
1214 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1215 GET_MODE_BITSIZE (GET_MODE (temp
))))
1216 return gen_lowpart (mode
, temp
);
1218 return convert_to_mode (mode
, temp
, unsignedp
);
1222 /* Look for a wider mode of the same class for which we think we
1223 can open-code the operation. Check for a widening multiply at the
1224 wider mode as well. */
1226 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1227 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1228 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1229 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1231 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1232 || (binoptab
== smul_optab
1233 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1234 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1235 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1236 != CODE_FOR_nothing
)))
1238 rtx xop0
= op0
, xop1
= op1
;
1241 /* For certain integer operations, we need not actually extend
1242 the narrow operands, as long as we will truncate
1243 the results to the same narrowness. */
1245 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1246 || binoptab
== xor_optab
1247 || binoptab
== add_optab
|| binoptab
== sub_optab
1248 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1249 && class == MODE_INT
)
1252 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1254 /* The second operand of a shift must always be extended. */
1255 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1256 no_extend
&& binoptab
!= ashl_optab
);
1258 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1259 unsignedp
, OPTAB_DIRECT
);
1262 if (class != MODE_INT
1263 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1264 GET_MODE_BITSIZE (wider_mode
)))
1267 target
= gen_reg_rtx (mode
);
1268 convert_move (target
, temp
, 0);
1272 return gen_lowpart (mode
, temp
);
1275 delete_insns_since (last
);
1279 /* These can be done a word at a time. */
1280 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1281 && class == MODE_INT
1282 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1283 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1289 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1290 won't be accurate, so use a new target. */
1291 if (target
== 0 || target
== op0
|| target
== op1
)
1292 target
= gen_reg_rtx (mode
);
1296 /* Do the actual arithmetic. */
1297 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1299 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1300 rtx x
= expand_binop (word_mode
, binoptab
,
1301 operand_subword_force (op0
, i
, mode
),
1302 operand_subword_force (op1
, i
, mode
),
1303 target_piece
, unsignedp
, next_methods
);
1308 if (target_piece
!= x
)
1309 emit_move_insn (target_piece
, x
);
1312 insns
= get_insns ();
1315 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1317 if (binoptab
->code
!= UNKNOWN
)
1319 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1320 copy_rtx (op0
), copy_rtx (op1
));
1324 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1329 /* Synthesize double word shifts from single word shifts. */
1330 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1331 || binoptab
== ashr_optab
)
1332 && class == MODE_INT
1333 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1334 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1335 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1336 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1337 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1339 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1340 enum machine_mode op1_mode
;
1342 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1343 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1344 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1346 /* Apply the truncation to constant shifts. */
1347 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1348 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1350 if (op1
== CONST0_RTX (op1_mode
))
1353 /* Make sure that this is a combination that expand_doubleword_shift
1354 can handle. See the comments there for details. */
1355 if (double_shift_mask
== 0
1356 || (shift_mask
== BITS_PER_WORD
- 1
1357 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1359 rtx insns
, equiv_value
;
1360 rtx into_target
, outof_target
;
1361 rtx into_input
, outof_input
;
1362 int left_shift
, outof_word
;
1364 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1365 won't be accurate, so use a new target. */
1366 if (target
== 0 || target
== op0
|| target
== op1
)
1367 target
= gen_reg_rtx (mode
);
1371 /* OUTOF_* is the word we are shifting bits away from, and
1372 INTO_* is the word that we are shifting bits towards, thus
1373 they differ depending on the direction of the shift and
1374 WORDS_BIG_ENDIAN. */
1376 left_shift
= binoptab
== ashl_optab
;
1377 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1379 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1380 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1382 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1383 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1385 if (expand_doubleword_shift (op1_mode
, binoptab
,
1386 outof_input
, into_input
, op1
,
1387 outof_target
, into_target
,
1388 unsignedp
, next_methods
, shift_mask
))
1390 insns
= get_insns ();
1393 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1394 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1401 /* Synthesize double word rotates from single word shifts. */
1402 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1403 && class == MODE_INT
1404 && GET_CODE (op1
) == CONST_INT
1405 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1406 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1407 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1410 rtx into_target
, outof_target
;
1411 rtx into_input
, outof_input
;
1413 int shift_count
, left_shift
, outof_word
;
1415 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1416 won't be accurate, so use a new target. Do this also if target is not
1417 a REG, first because having a register instead may open optimization
1418 opportunities, and second because if target and op0 happen to be MEMs
1419 designating the same location, we would risk clobbering it too early
1420 in the code sequence we generate below. */
1421 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1422 target
= gen_reg_rtx (mode
);
1426 shift_count
= INTVAL (op1
);
1428 /* OUTOF_* is the word we are shifting bits away from, and
1429 INTO_* is the word that we are shifting bits towards, thus
1430 they differ depending on the direction of the shift and
1431 WORDS_BIG_ENDIAN. */
1433 left_shift
= (binoptab
== rotl_optab
);
1434 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1436 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1437 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1439 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1440 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1442 if (shift_count
== BITS_PER_WORD
)
1444 /* This is just a word swap. */
1445 emit_move_insn (outof_target
, into_input
);
1446 emit_move_insn (into_target
, outof_input
);
1451 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1452 rtx first_shift_count
, second_shift_count
;
1453 optab reverse_unsigned_shift
, unsigned_shift
;
1455 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1456 ? lshr_optab
: ashl_optab
);
1458 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1459 ? ashl_optab
: lshr_optab
);
1461 if (shift_count
> BITS_PER_WORD
)
1463 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1464 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1468 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1469 second_shift_count
= GEN_INT (shift_count
);
1472 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1473 outof_input
, first_shift_count
,
1474 NULL_RTX
, unsignedp
, next_methods
);
1475 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1476 into_input
, second_shift_count
,
1477 NULL_RTX
, unsignedp
, next_methods
);
1479 if (into_temp1
!= 0 && into_temp2
!= 0)
1480 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1481 into_target
, unsignedp
, next_methods
);
1485 if (inter
!= 0 && inter
!= into_target
)
1486 emit_move_insn (into_target
, inter
);
1488 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1489 into_input
, first_shift_count
,
1490 NULL_RTX
, unsignedp
, next_methods
);
1491 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1492 outof_input
, second_shift_count
,
1493 NULL_RTX
, unsignedp
, next_methods
);
1495 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1496 inter
= expand_binop (word_mode
, ior_optab
,
1497 outof_temp1
, outof_temp2
,
1498 outof_target
, unsignedp
, next_methods
);
1500 if (inter
!= 0 && inter
!= outof_target
)
1501 emit_move_insn (outof_target
, inter
);
1504 insns
= get_insns ();
1514 /* These can be done a word at a time by propagating carries. */
1515 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1516 && class == MODE_INT
1517 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1518 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1521 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1522 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1523 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1524 rtx xop0
, xop1
, xtarget
;
1526 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1527 value is one of those, use it. Otherwise, use 1 since it is the
1528 one easiest to get. */
1529 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1530 int normalizep
= STORE_FLAG_VALUE
;
1535 /* Prepare the operands. */
1536 xop0
= force_reg (mode
, op0
);
1537 xop1
= force_reg (mode
, op1
);
1539 xtarget
= gen_reg_rtx (mode
);
1541 if (target
== 0 || !REG_P (target
))
1544 /* Indicate for flow that the entire target reg is being set. */
1546 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1548 /* Do the actual arithmetic. */
1549 for (i
= 0; i
< nwords
; i
++)
1551 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1552 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1553 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1554 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1557 /* Main add/subtract of the input operands. */
1558 x
= expand_binop (word_mode
, binoptab
,
1559 op0_piece
, op1_piece
,
1560 target_piece
, unsignedp
, next_methods
);
1566 /* Store carry from main add/subtract. */
1567 carry_out
= gen_reg_rtx (word_mode
);
1568 carry_out
= emit_store_flag_force (carry_out
,
1569 (binoptab
== add_optab
1572 word_mode
, 1, normalizep
);
1579 /* Add/subtract previous carry to main result. */
1580 newx
= expand_binop (word_mode
,
1581 normalizep
== 1 ? binoptab
: otheroptab
,
1583 NULL_RTX
, 1, next_methods
);
1587 /* Get out carry from adding/subtracting carry in. */
1588 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1589 carry_tmp
= emit_store_flag_force (carry_tmp
,
1590 (binoptab
== add_optab
1593 word_mode
, 1, normalizep
);
1595 /* Logical-ior the two poss. carry together. */
1596 carry_out
= expand_binop (word_mode
, ior_optab
,
1597 carry_out
, carry_tmp
,
1598 carry_out
, 0, next_methods
);
1602 emit_move_insn (target_piece
, newx
);
1606 if (x
!= target_piece
)
1607 emit_move_insn (target_piece
, x
);
1610 carry_in
= carry_out
;
1613 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1615 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1616 || ! rtx_equal_p (target
, xtarget
))
1618 rtx temp
= emit_move_insn (target
, xtarget
);
1620 set_unique_reg_note (temp
,
1622 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1633 delete_insns_since (last
);
1636 /* Attempt to synthesize double word multiplies using a sequence of word
1637 mode multiplications. We first attempt to generate a sequence using a
1638 more efficient unsigned widening multiply, and if that fails we then
1639 try using a signed widening multiply. */
1641 if (binoptab
== smul_optab
1642 && class == MODE_INT
1643 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1644 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1645 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1647 rtx product
= NULL_RTX
;
1649 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1650 != CODE_FOR_nothing
)
1652 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1655 delete_insns_since (last
);
1658 if (product
== NULL_RTX
1659 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1660 != CODE_FOR_nothing
)
1662 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1665 delete_insns_since (last
);
1668 if (product
!= NULL_RTX
)
1670 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1672 temp
= emit_move_insn (target
? target
: product
, product
);
1673 set_unique_reg_note (temp
,
1675 gen_rtx_fmt_ee (MULT
, mode
,
1683 /* It can't be open-coded in this mode.
1684 Use a library call if one is available and caller says that's ok. */
1686 if (binoptab
->handlers
[(int) mode
].libfunc
1687 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1691 enum machine_mode op1_mode
= mode
;
1698 op1_mode
= word_mode
;
1699 /* Specify unsigned here,
1700 since negative shift counts are meaningless. */
1701 op1x
= convert_to_mode (word_mode
, op1
, 1);
1704 if (GET_MODE (op0
) != VOIDmode
1705 && GET_MODE (op0
) != mode
)
1706 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1708 /* Pass 1 for NO_QUEUE so we don't lose any increments
1709 if the libcall is cse'd or moved. */
1710 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1711 NULL_RTX
, LCT_CONST
, mode
, 2,
1712 op0
, mode
, op1x
, op1_mode
);
1714 insns
= get_insns ();
1717 target
= gen_reg_rtx (mode
);
1718 emit_libcall_block (insns
, target
, value
,
1719 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1724 delete_insns_since (last
);
1726 /* It can't be done in this mode. Can we do it in a wider mode? */
1728 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1729 || methods
== OPTAB_MUST_WIDEN
))
1731 /* Caller says, don't even try. */
1732 delete_insns_since (entry_last
);
1736 /* Compute the value of METHODS to pass to recursive calls.
1737 Don't allow widening to be tried recursively. */
1739 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1741 /* Look for a wider mode of the same class for which it appears we can do
1744 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1746 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1747 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1749 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1750 != CODE_FOR_nothing
)
1751 || (methods
== OPTAB_LIB
1752 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1754 rtx xop0
= op0
, xop1
= op1
;
1757 /* For certain integer operations, we need not actually extend
1758 the narrow operands, as long as we will truncate
1759 the results to the same narrowness. */
1761 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1762 || binoptab
== xor_optab
1763 || binoptab
== add_optab
|| binoptab
== sub_optab
1764 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1765 && class == MODE_INT
)
1768 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1769 unsignedp
, no_extend
);
1771 /* The second operand of a shift must always be extended. */
1772 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1773 no_extend
&& binoptab
!= ashl_optab
);
1775 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1776 unsignedp
, methods
);
1779 if (class != MODE_INT
1780 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1781 GET_MODE_BITSIZE (wider_mode
)))
1784 target
= gen_reg_rtx (mode
);
1785 convert_move (target
, temp
, 0);
1789 return gen_lowpart (mode
, temp
);
1792 delete_insns_since (last
);
1797 delete_insns_since (entry_last
);
1801 /* Expand a binary operator which has both signed and unsigned forms.
1802 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1805 If we widen unsigned operands, we may use a signed wider operation instead
1806 of an unsigned wider operation, since the result would be the same. */
1809 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1810 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1811 enum optab_methods methods
)
1814 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1815 struct optab wide_soptab
;
1817 /* Do it without widening, if possible. */
1818 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1819 unsignedp
, OPTAB_DIRECT
);
1820 if (temp
|| methods
== OPTAB_DIRECT
)
1823 /* Try widening to a signed int. Make a fake signed optab that
1824 hides any signed insn for direct use. */
1825 wide_soptab
= *soptab
;
1826 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1827 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1829 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1830 unsignedp
, OPTAB_WIDEN
);
1832 /* For unsigned operands, try widening to an unsigned int. */
1833 if (temp
== 0 && unsignedp
)
1834 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1835 unsignedp
, OPTAB_WIDEN
);
1836 if (temp
|| methods
== OPTAB_WIDEN
)
1839 /* Use the right width lib call if that exists. */
1840 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1841 if (temp
|| methods
== OPTAB_LIB
)
1844 /* Must widen and use a lib call, use either signed or unsigned. */
1845 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1846 unsignedp
, methods
);
1850 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1851 unsignedp
, methods
);
1855 /* Generate code to perform an operation specified by UNOPPTAB
1856 on operand OP0, with two results to TARG0 and TARG1.
1857 We assume that the order of the operands for the instruction
1858 is TARG0, TARG1, OP0.
1860 Either TARG0 or TARG1 may be zero, but what that means is that
1861 the result is not actually wanted. We will generate it into
1862 a dummy pseudo-reg and discard it. They may not both be zero.
1864 Returns 1 if this operation can be performed; 0 if not. */
1867 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1870 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1871 enum mode_class
class;
1872 enum machine_mode wider_mode
;
1873 rtx entry_last
= get_last_insn ();
1876 class = GET_MODE_CLASS (mode
);
1879 targ0
= gen_reg_rtx (mode
);
1881 targ1
= gen_reg_rtx (mode
);
1883 /* Record where to go back to if we fail. */
1884 last
= get_last_insn ();
1886 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1888 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1889 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1893 if (GET_MODE (xop0
) != VOIDmode
1894 && GET_MODE (xop0
) != mode0
)
1895 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1897 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1898 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
1899 xop0
= copy_to_mode_reg (mode0
, xop0
);
1901 /* We could handle this, but we should always be called with a pseudo
1902 for our targets and all insns should take them as outputs. */
1903 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
1904 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
1906 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1913 delete_insns_since (last
);
1916 /* It can't be done in this mode. Can we do it in a wider mode? */
1918 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1920 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1921 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1923 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1924 != CODE_FOR_nothing
)
1926 rtx t0
= gen_reg_rtx (wider_mode
);
1927 rtx t1
= gen_reg_rtx (wider_mode
);
1928 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1930 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1932 convert_move (targ0
, t0
, unsignedp
);
1933 convert_move (targ1
, t1
, unsignedp
);
1937 delete_insns_since (last
);
1942 delete_insns_since (entry_last
);
1946 /* Generate code to perform an operation specified by BINOPTAB
1947 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1948 We assume that the order of the operands for the instruction
1949 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1950 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1952 Either TARG0 or TARG1 may be zero, but what that means is that
1953 the result is not actually wanted. We will generate it into
1954 a dummy pseudo-reg and discard it. They may not both be zero.
1956 Returns 1 if this operation can be performed; 0 if not. */
1959 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1962 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1963 enum mode_class
class;
1964 enum machine_mode wider_mode
;
1965 rtx entry_last
= get_last_insn ();
1968 class = GET_MODE_CLASS (mode
);
1970 /* If we are inside an appropriately-short loop and we are optimizing,
1971 force expensive constants into a register. */
1972 if (CONSTANT_P (op0
) && optimize
1973 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1974 op0
= force_reg (mode
, op0
);
1976 if (CONSTANT_P (op1
) && optimize
1977 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1978 op1
= force_reg (mode
, op1
);
1981 targ0
= gen_reg_rtx (mode
);
1983 targ1
= gen_reg_rtx (mode
);
1985 /* Record where to go back to if we fail. */
1986 last
= get_last_insn ();
1988 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1990 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1991 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1992 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1994 rtx xop0
= op0
, xop1
= op1
;
1996 /* In case the insn wants input operands in modes different from
1997 those of the actual operands, convert the operands. It would
1998 seem that we don't need to convert CONST_INTs, but we do, so
1999 that they're properly zero-extended, sign-extended or truncated
2002 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2003 xop0
= convert_modes (mode0
,
2004 GET_MODE (op0
) != VOIDmode
2009 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2010 xop1
= convert_modes (mode1
,
2011 GET_MODE (op1
) != VOIDmode
2016 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2017 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2018 xop0
= copy_to_mode_reg (mode0
, xop0
);
2020 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2021 xop1
= copy_to_mode_reg (mode1
, xop1
);
2023 /* We could handle this, but we should always be called with a pseudo
2024 for our targets and all insns should take them as outputs. */
2025 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2026 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2028 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2035 delete_insns_since (last
);
2038 /* It can't be done in this mode. Can we do it in a wider mode? */
2040 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2042 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2043 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2045 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2046 != CODE_FOR_nothing
)
2048 rtx t0
= gen_reg_rtx (wider_mode
);
2049 rtx t1
= gen_reg_rtx (wider_mode
);
2050 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2051 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2053 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2056 convert_move (targ0
, t0
, unsignedp
);
2057 convert_move (targ1
, t1
, unsignedp
);
2061 delete_insns_since (last
);
2066 delete_insns_since (entry_last
);
2070 /* Expand the two-valued library call indicated by BINOPTAB, but
2071 preserve only one of the values. If TARG0 is non-NULL, the first
2072 value is placed into TARG0; otherwise the second value is placed
2073 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2074 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2075 This routine assumes that the value returned by the library call is
2076 as if the return value was of an integral mode twice as wide as the
2077 mode of OP0. Returns 1 if the call was successful. */
2080 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2081 rtx targ0
, rtx targ1
, enum rtx_code code
)
2083 enum machine_mode mode
;
2084 enum machine_mode libval_mode
;
2088 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2089 gcc_assert (!targ0
!= !targ1
);
2091 mode
= GET_MODE (op0
);
2092 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2095 /* The value returned by the library function will have twice as
2096 many bits as the nominal MODE. */
2097 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2100 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2101 NULL_RTX
, LCT_CONST
,
2105 /* Get the part of VAL containing the value that we want. */
2106 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2107 targ0
? 0 : GET_MODE_SIZE (mode
));
2108 insns
= get_insns ();
2110 /* Move the into the desired location. */
2111 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2112 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2118 /* Wrapper around expand_unop which takes an rtx code to specify
2119 the operation to perform, not an optab pointer. All other
2120 arguments are the same. */
2122 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2123 rtx target
, int unsignedp
)
2125 optab unop
= code_to_optab
[(int) code
];
2128 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2134 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2136 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2138 enum mode_class
class = GET_MODE_CLASS (mode
);
2139 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2141 enum machine_mode wider_mode
;
2142 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2143 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2145 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2146 != CODE_FOR_nothing
)
2148 rtx xop0
, temp
, last
;
2150 last
= get_last_insn ();
2153 target
= gen_reg_rtx (mode
);
2154 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2155 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2157 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2158 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2159 - GET_MODE_BITSIZE (mode
)),
2160 target
, true, OPTAB_DIRECT
);
2162 delete_insns_since (last
);
2171 /* Try calculating (parity x) as (and (popcount x) 1), where
2172 popcount can also be done in a wider mode. */
2174 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2176 enum mode_class
class = GET_MODE_CLASS (mode
);
2177 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2179 enum machine_mode wider_mode
;
2180 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2181 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2183 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2184 != CODE_FOR_nothing
)
2186 rtx xop0
, temp
, last
;
2188 last
= get_last_insn ();
2191 target
= gen_reg_rtx (mode
);
2192 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2193 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2196 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2197 target
, true, OPTAB_DIRECT
);
2199 delete_insns_since (last
);
2208 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2209 conditions, VAL may already be a SUBREG against which we cannot generate
2210 a further SUBREG. In this case, we expect forcing the value into a
2211 register will work around the situation. */
2214 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2215 enum machine_mode imode
)
2218 ret
= lowpart_subreg (omode
, val
, imode
);
2221 val
= force_reg (imode
, val
);
2222 ret
= lowpart_subreg (omode
, val
, imode
);
2223 gcc_assert (ret
!= NULL
);
2228 /* Expand a floating point absolute value or negation operation via a
2229 logical operation on the sign bit. */
2232 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2233 rtx op0
, rtx target
)
2235 const struct real_format
*fmt
;
2236 int bitpos
, word
, nwords
, i
;
2237 enum machine_mode imode
;
2238 HOST_WIDE_INT hi
, lo
;
2241 /* The format has to have a simple sign bit. */
2242 fmt
= REAL_MODE_FORMAT (mode
);
2246 bitpos
= fmt
->signbit_rw
;
2250 /* Don't create negative zeros if the format doesn't support them. */
2251 if (code
== NEG
&& !fmt
->has_signed_zero
)
2254 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2256 imode
= int_mode_for_mode (mode
);
2257 if (imode
== BLKmode
)
2266 if (FLOAT_WORDS_BIG_ENDIAN
)
2267 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2269 word
= bitpos
/ BITS_PER_WORD
;
2270 bitpos
= bitpos
% BITS_PER_WORD
;
2271 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2274 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2277 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2281 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2287 if (target
== 0 || target
== op0
)
2288 target
= gen_reg_rtx (mode
);
2294 for (i
= 0; i
< nwords
; ++i
)
2296 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2297 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2301 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2303 immed_double_const (lo
, hi
, imode
),
2304 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2305 if (temp
!= targ_piece
)
2306 emit_move_insn (targ_piece
, temp
);
2309 emit_move_insn (targ_piece
, op0_piece
);
2312 insns
= get_insns ();
2315 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2316 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2320 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2321 gen_lowpart (imode
, op0
),
2322 immed_double_const (lo
, hi
, imode
),
2323 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2324 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2326 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2327 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2333 /* Generate code to perform an operation specified by UNOPTAB
2334 on operand OP0, with result having machine-mode MODE.
2336 UNSIGNEDP is for the case where we have to widen the operands
2337 to perform the operation. It says to use zero-extension.
2339 If TARGET is nonzero, the value
2340 is generated there, if it is convenient to do so.
2341 In all cases an rtx is returned for the locus of the value;
2342 this may or may not be TARGET. */
2345 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2348 enum mode_class
class;
2349 enum machine_mode wider_mode
;
2351 rtx last
= get_last_insn ();
2354 class = GET_MODE_CLASS (mode
);
2356 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2358 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2359 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2365 temp
= gen_reg_rtx (mode
);
2367 if (GET_MODE (xop0
) != VOIDmode
2368 && GET_MODE (xop0
) != mode0
)
2369 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2371 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2373 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2374 xop0
= copy_to_mode_reg (mode0
, xop0
);
2376 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2377 temp
= gen_reg_rtx (mode
);
2379 pat
= GEN_FCN (icode
) (temp
, xop0
);
2382 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2383 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2385 delete_insns_since (last
);
2386 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2394 delete_insns_since (last
);
2397 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2399 /* Widening clz needs special treatment. */
2400 if (unoptab
== clz_optab
)
2402 temp
= widen_clz (mode
, op0
, target
);
2409 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2410 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2411 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2413 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2417 /* For certain operations, we need not actually extend
2418 the narrow operand, as long as we will truncate the
2419 results to the same narrowness. */
2421 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2422 (unoptab
== neg_optab
2423 || unoptab
== one_cmpl_optab
)
2424 && class == MODE_INT
);
2426 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2431 if (class != MODE_INT
2432 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2433 GET_MODE_BITSIZE (wider_mode
)))
2436 target
= gen_reg_rtx (mode
);
2437 convert_move (target
, temp
, 0);
2441 return gen_lowpart (mode
, temp
);
2444 delete_insns_since (last
);
2448 /* These can be done a word at a time. */
2449 if (unoptab
== one_cmpl_optab
2450 && class == MODE_INT
2451 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2452 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2457 if (target
== 0 || target
== op0
)
2458 target
= gen_reg_rtx (mode
);
2462 /* Do the actual arithmetic. */
2463 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2465 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2466 rtx x
= expand_unop (word_mode
, unoptab
,
2467 operand_subword_force (op0
, i
, mode
),
2468 target_piece
, unsignedp
);
2470 if (target_piece
!= x
)
2471 emit_move_insn (target_piece
, x
);
2474 insns
= get_insns ();
2477 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2478 gen_rtx_fmt_e (unoptab
->code
, mode
,
2483 if (unoptab
->code
== NEG
)
2485 /* Try negating floating point values by flipping the sign bit. */
2486 if (class == MODE_FLOAT
)
2488 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2493 /* If there is no negation pattern, and we have no negative zero,
2494 try subtracting from zero. */
2495 if (!HONOR_SIGNED_ZEROS (mode
))
2497 temp
= expand_binop (mode
, (unoptab
== negv_optab
2498 ? subv_optab
: sub_optab
),
2499 CONST0_RTX (mode
), op0
, target
,
2500 unsignedp
, OPTAB_DIRECT
);
2506 /* Try calculating parity (x) as popcount (x) % 2. */
2507 if (unoptab
== parity_optab
)
2509 temp
= expand_parity (mode
, op0
, target
);
2515 /* Now try a library call in this mode. */
2516 if (unoptab
->handlers
[(int) mode
].libfunc
)
2520 enum machine_mode outmode
= mode
;
2522 /* All of these functions return small values. Thus we choose to
2523 have them return something that isn't a double-word. */
2524 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2525 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2527 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2531 /* Pass 1 for NO_QUEUE so we don't lose any increments
2532 if the libcall is cse'd or moved. */
2533 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2534 NULL_RTX
, LCT_CONST
, outmode
,
2536 insns
= get_insns ();
2539 target
= gen_reg_rtx (outmode
);
2540 emit_libcall_block (insns
, target
, value
,
2541 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2546 /* It can't be done in this mode. Can we do it in a wider mode? */
2548 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2550 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2551 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2553 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2554 != CODE_FOR_nothing
)
2555 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2559 /* For certain operations, we need not actually extend
2560 the narrow operand, as long as we will truncate the
2561 results to the same narrowness. */
2563 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2564 (unoptab
== neg_optab
2565 || unoptab
== one_cmpl_optab
)
2566 && class == MODE_INT
);
2568 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2571 /* If we are generating clz using wider mode, adjust the
2573 if (unoptab
== clz_optab
&& temp
!= 0)
2574 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2575 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2576 - GET_MODE_BITSIZE (mode
)),
2577 target
, true, OPTAB_DIRECT
);
2581 if (class != MODE_INT
)
2584 target
= gen_reg_rtx (mode
);
2585 convert_move (target
, temp
, 0);
2589 return gen_lowpart (mode
, temp
);
2592 delete_insns_since (last
);
2597 /* One final attempt at implementing negation via subtraction,
2598 this time allowing widening of the operand. */
2599 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2602 temp
= expand_binop (mode
,
2603 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2604 CONST0_RTX (mode
), op0
,
2605 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2613 /* Emit code to compute the absolute value of OP0, with result to
2614 TARGET if convenient. (TARGET may be 0.) The return value says
2615 where the result actually is to be found.
2617 MODE is the mode of the operand; the mode of the result is
2618 different but can be deduced from MODE.
2623 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2624 int result_unsignedp
)
2629 result_unsignedp
= 1;
2631 /* First try to do it with a special abs instruction. */
2632 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2637 /* For floating point modes, try clearing the sign bit. */
2638 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2640 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2645 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2646 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2647 && !HONOR_SIGNED_ZEROS (mode
))
2649 rtx last
= get_last_insn ();
2651 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2653 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2659 delete_insns_since (last
);
2662 /* If this machine has expensive jumps, we can do integer absolute
2663 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2664 where W is the width of MODE. */
2666 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2668 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2669 size_int (GET_MODE_BITSIZE (mode
) - 1),
2672 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2675 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2676 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2686 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2687 int result_unsignedp
, int safe
)
2692 result_unsignedp
= 1;
2694 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2698 /* If that does not win, use conditional jump and negate. */
2700 /* It is safe to use the target if it is the same
2701 as the source if this is also a pseudo register */
2702 if (op0
== target
&& REG_P (op0
)
2703 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2706 op1
= gen_label_rtx ();
2707 if (target
== 0 || ! safe
2708 || GET_MODE (target
) != mode
2709 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2711 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2712 target
= gen_reg_rtx (mode
);
2714 emit_move_insn (target
, op0
);
2717 /* If this mode is an integer too wide to compare properly,
2718 compare word by word. Rely on CSE to optimize constant cases. */
2719 if (GET_MODE_CLASS (mode
) == MODE_INT
2720 && ! can_compare_p (GE
, mode
, ccp_jump
))
2721 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2724 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2725 NULL_RTX
, NULL_RTX
, op1
);
2727 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2730 emit_move_insn (target
, op0
);
2736 /* A subroutine of expand_copysign, perform the copysign operation using the
2737 abs and neg primitives advertised to exist on the target. The assumption
2738 is that we have a split register file, and leaving op0 in fp registers,
2739 and not playing with subregs so much, will help the register allocator. */
2742 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2743 int bitpos
, bool op0_is_abs
)
2745 enum machine_mode imode
;
2746 HOST_WIDE_INT hi
, lo
;
2755 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2762 if (target
== NULL_RTX
)
2763 target
= copy_to_reg (op0
);
2765 emit_move_insn (target
, op0
);
2768 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2770 imode
= int_mode_for_mode (mode
);
2771 if (imode
== BLKmode
)
2773 op1
= gen_lowpart (imode
, op1
);
2778 if (FLOAT_WORDS_BIG_ENDIAN
)
2779 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2781 word
= bitpos
/ BITS_PER_WORD
;
2782 bitpos
= bitpos
% BITS_PER_WORD
;
2783 op1
= operand_subword_force (op1
, word
, mode
);
2786 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2789 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2793 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2797 op1
= expand_binop (imode
, and_optab
, op1
,
2798 immed_double_const (lo
, hi
, imode
),
2799 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2801 label
= gen_label_rtx ();
2802 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2804 if (GET_CODE (op0
) == CONST_DOUBLE
)
2805 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2807 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2809 emit_move_insn (target
, op0
);
2817 /* A subroutine of expand_copysign, perform the entire copysign operation
2818 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2819 is true if op0 is known to have its sign bit clear. */
2822 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2823 int bitpos
, bool op0_is_abs
)
2825 enum machine_mode imode
;
2826 HOST_WIDE_INT hi
, lo
;
2827 int word
, nwords
, i
;
2830 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2832 imode
= int_mode_for_mode (mode
);
2833 if (imode
== BLKmode
)
2842 if (FLOAT_WORDS_BIG_ENDIAN
)
2843 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2845 word
= bitpos
/ BITS_PER_WORD
;
2846 bitpos
= bitpos
% BITS_PER_WORD
;
2847 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2850 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2853 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2857 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2861 if (target
== 0 || target
== op0
|| target
== op1
)
2862 target
= gen_reg_rtx (mode
);
2868 for (i
= 0; i
< nwords
; ++i
)
2870 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2871 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2876 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2877 immed_double_const (~lo
, ~hi
, imode
),
2878 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2880 op1
= expand_binop (imode
, and_optab
,
2881 operand_subword_force (op1
, i
, mode
),
2882 immed_double_const (lo
, hi
, imode
),
2883 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2885 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2886 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2887 if (temp
!= targ_piece
)
2888 emit_move_insn (targ_piece
, temp
);
2891 emit_move_insn (targ_piece
, op0_piece
);
2894 insns
= get_insns ();
2897 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2901 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2902 immed_double_const (lo
, hi
, imode
),
2903 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2905 op0
= gen_lowpart (imode
, op0
);
2907 op0
= expand_binop (imode
, and_optab
, op0
,
2908 immed_double_const (~lo
, ~hi
, imode
),
2909 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2911 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2912 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2913 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2919 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2920 scalar floating point mode. Return NULL if we do not know how to
2921 expand the operation inline. */
2924 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2926 enum machine_mode mode
= GET_MODE (op0
);
2927 const struct real_format
*fmt
;
2931 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2932 gcc_assert (GET_MODE (op1
) == mode
);
2934 /* First try to do it with a special instruction. */
2935 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2936 target
, 0, OPTAB_DIRECT
);
2940 fmt
= REAL_MODE_FORMAT (mode
);
2941 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2945 if (GET_CODE (op0
) == CONST_DOUBLE
)
2947 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2948 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2952 if (fmt
->signbit_ro
>= 0
2953 && (GET_CODE (op0
) == CONST_DOUBLE
2954 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2955 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2957 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2958 fmt
->signbit_ro
, op0_is_abs
);
2963 if (fmt
->signbit_rw
< 0)
2965 return expand_copysign_bit (mode
, op0
, op1
, target
,
2966 fmt
->signbit_rw
, op0_is_abs
);
2969 /* Generate an instruction whose insn-code is INSN_CODE,
2970 with two operands: an output TARGET and an input OP0.
2971 TARGET *must* be nonzero, and the output is always stored there.
2972 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2973 the value that is stored into TARGET. */
2976 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2979 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2984 /* Now, if insn does not accept our operands, put them into pseudos. */
2986 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
2987 op0
= copy_to_mode_reg (mode0
, op0
);
2989 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
2990 temp
= gen_reg_rtx (GET_MODE (temp
));
2992 pat
= GEN_FCN (icode
) (temp
, op0
);
2994 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2995 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3000 emit_move_insn (target
, temp
);
3003 struct no_conflict_data
3005 rtx target
, first
, insn
;
3009 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3010 Set P->must_stay if the currently examined clobber / store has to stay
3011 in the list of insns that constitute the actual no_conflict block /
3014 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
3016 struct no_conflict_data
*p
= p0
;
3018 /* If this inns directly contributes to setting the target, it must stay. */
3019 if (reg_overlap_mentioned_p (p
->target
, dest
))
3020 p
->must_stay
= true;
3021 /* If we haven't committed to keeping any other insns in the list yet,
3022 there is nothing more to check. */
3023 else if (p
->insn
== p
->first
)
3025 /* If this insn sets / clobbers a register that feeds one of the insns
3026 already in the list, this insn has to stay too. */
3027 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3028 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3029 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3030 /* Likewise if this insn depends on a register set by a previous
3031 insn in the list, or if it sets a result (presumably a hard
3032 register) that is set or clobbered by a previous insn.
3033 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3034 SET_DEST perform the former check on the address, and the latter
3035 check on the MEM. */
3036 || (GET_CODE (set
) == SET
3037 && (modified_in_p (SET_SRC (set
), p
->first
)
3038 || modified_in_p (SET_DEST (set
), p
->first
)
3039 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3040 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3041 p
->must_stay
= true;
3044 /* Emit code to perform a series of operations on a multi-word quantity, one
3047 Such a block is preceded by a CLOBBER of the output, consists of multiple
3048 insns, each setting one word of the output, and followed by a SET copying
3049 the output to itself.
3051 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3052 note indicating that it doesn't conflict with the (also multi-word)
3053 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3056 INSNS is a block of code generated to perform the operation, not including
3057 the CLOBBER and final copy. All insns that compute intermediate values
3058 are first emitted, followed by the block as described above.
3060 TARGET, OP0, and OP1 are the output and inputs of the operations,
3061 respectively. OP1 may be zero for a unary operation.
3063 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3066 If TARGET is not a register, INSNS is simply emitted with no special
3067 processing. Likewise if anything in INSNS is not an INSN or if
3068 there is a libcall block inside INSNS.
3070 The final insn emitted is returned. */
3073 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3075 rtx prev
, next
, first
, last
, insn
;
3077 if (!REG_P (target
) || reload_in_progress
)
3078 return emit_insn (insns
);
3080 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3081 if (!NONJUMP_INSN_P (insn
)
3082 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3083 return emit_insn (insns
);
3085 /* First emit all insns that do not store into words of the output and remove
3086 these from the list. */
3087 for (insn
= insns
; insn
; insn
= next
)
3090 struct no_conflict_data data
;
3092 next
= NEXT_INSN (insn
);
3094 /* Some ports (cris) create a libcall regions at their own. We must
3095 avoid any potential nesting of LIBCALLs. */
3096 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3097 remove_note (insn
, note
);
3098 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3099 remove_note (insn
, note
);
3101 data
.target
= target
;
3105 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3106 if (! data
.must_stay
)
3108 if (PREV_INSN (insn
))
3109 NEXT_INSN (PREV_INSN (insn
)) = next
;
3114 PREV_INSN (next
) = PREV_INSN (insn
);
3120 prev
= get_last_insn ();
3122 /* Now write the CLOBBER of the output, followed by the setting of each
3123 of the words, followed by the final copy. */
3124 if (target
!= op0
&& target
!= op1
)
3125 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3127 for (insn
= insns
; insn
; insn
= next
)
3129 next
= NEXT_INSN (insn
);
3132 if (op1
&& REG_P (op1
))
3133 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3136 if (op0
&& REG_P (op0
))
3137 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3141 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3142 != CODE_FOR_nothing
)
3144 last
= emit_move_insn (target
, target
);
3146 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3150 last
= get_last_insn ();
3152 /* Remove any existing REG_EQUAL note from "last", or else it will
3153 be mistaken for a note referring to the full contents of the
3154 alleged libcall value when found together with the REG_RETVAL
3155 note added below. An existing note can come from an insn
3156 expansion at "last". */
3157 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3161 first
= get_insns ();
3163 first
= NEXT_INSN (prev
);
3165 /* Encapsulate the block so it gets manipulated as a unit. */
3166 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3168 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3173 /* Emit code to make a call to a constant function or a library call.
3175 INSNS is a list containing all insns emitted in the call.
3176 These insns leave the result in RESULT. Our block is to copy RESULT
3177 to TARGET, which is logically equivalent to EQUIV.
3179 We first emit any insns that set a pseudo on the assumption that these are
3180 loading constants into registers; doing so allows them to be safely cse'ed
3181 between blocks. Then we emit all the other insns in the block, followed by
3182 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3183 note with an operand of EQUIV.
3185 Moving assignments to pseudos outside of the block is done to improve
3186 the generated code, but is not required to generate correct code,
3187 hence being unable to move an assignment is not grounds for not making
3188 a libcall block. There are two reasons why it is safe to leave these
3189 insns inside the block: First, we know that these pseudos cannot be
3190 used in generated RTL outside the block since they are created for
3191 temporary purposes within the block. Second, CSE will not record the
3192 values of anything set inside a libcall block, so we know they must
3193 be dead at the end of the block.
3195 Except for the first group of insns (the ones setting pseudos), the
3196 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3199 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3201 rtx final_dest
= target
;
3202 rtx prev
, next
, first
, last
, insn
;
3204 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3205 into a MEM later. Protect the libcall block from this change. */
3206 if (! REG_P (target
) || REG_USERVAR_P (target
))
3207 target
= gen_reg_rtx (GET_MODE (target
));
3209 /* If we're using non-call exceptions, a libcall corresponding to an
3210 operation that may trap may also trap. */
3211 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3213 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3216 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3218 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3219 remove_note (insn
, note
);
3223 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3224 reg note to indicate that this call cannot throw or execute a nonlocal
3225 goto (unless there is already a REG_EH_REGION note, in which case
3227 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3230 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3233 XEXP (note
, 0) = constm1_rtx
;
3235 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3239 /* First emit all insns that set pseudos. Remove them from the list as
3240 we go. Avoid insns that set pseudos which were referenced in previous
3241 insns. These can be generated by move_by_pieces, for example,
3242 to update an address. Similarly, avoid insns that reference things
3243 set in previous insns. */
3245 for (insn
= insns
; insn
; insn
= next
)
3247 rtx set
= single_set (insn
);
3250 /* Some ports (cris) create a libcall regions at their own. We must
3251 avoid any potential nesting of LIBCALLs. */
3252 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3253 remove_note (insn
, note
);
3254 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3255 remove_note (insn
, note
);
3257 next
= NEXT_INSN (insn
);
3259 if (set
!= 0 && REG_P (SET_DEST (set
))
3260 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3262 struct no_conflict_data data
;
3264 data
.target
= const0_rtx
;
3268 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3269 if (! data
.must_stay
)
3271 if (PREV_INSN (insn
))
3272 NEXT_INSN (PREV_INSN (insn
)) = next
;
3277 PREV_INSN (next
) = PREV_INSN (insn
);
3283 /* Some ports use a loop to copy large arguments onto the stack.
3284 Don't move anything outside such a loop. */
3289 prev
= get_last_insn ();
3291 /* Write the remaining insns followed by the final copy. */
3293 for (insn
= insns
; insn
; insn
= next
)
3295 next
= NEXT_INSN (insn
);
3300 last
= emit_move_insn (target
, result
);
3301 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3302 != CODE_FOR_nothing
)
3303 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3306 /* Remove any existing REG_EQUAL note from "last", or else it will
3307 be mistaken for a note referring to the full contents of the
3308 libcall value when found together with the REG_RETVAL note added
3309 below. An existing note can come from an insn expansion at
3311 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3314 if (final_dest
!= target
)
3315 emit_move_insn (final_dest
, target
);
3318 first
= get_insns ();
3320 first
= NEXT_INSN (prev
);
3322 /* Encapsulate the block so it gets manipulated as a unit. */
3323 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3325 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3326 when the encapsulated region would not be in one basic block,
3327 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3329 bool attach_libcall_retval_notes
= true;
3330 next
= NEXT_INSN (last
);
3331 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3332 if (control_flow_insn_p (insn
))
3334 attach_libcall_retval_notes
= false;
3338 if (attach_libcall_retval_notes
)
3340 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3342 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3348 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3349 PURPOSE describes how this comparison will be used. CODE is the rtx
3350 comparison code we will be using.
3352 ??? Actually, CODE is slightly weaker than that. A target is still
3353 required to implement all of the normal bcc operations, but not
3354 required to implement all (or any) of the unordered bcc operations. */
3357 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3358 enum can_compare_purpose purpose
)
3362 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3364 if (purpose
== ccp_jump
)
3365 return bcc_gen_fctn
[(int) code
] != NULL
;
3366 else if (purpose
== ccp_store_flag
)
3367 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3369 /* There's only one cmov entry point, and it's allowed to fail. */
3372 if (purpose
== ccp_jump
3373 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3375 if (purpose
== ccp_cmov
3376 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3378 if (purpose
== ccp_store_flag
3379 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3381 mode
= GET_MODE_WIDER_MODE (mode
);
3383 while (mode
!= VOIDmode
);
3388 /* This function is called when we are going to emit a compare instruction that
3389 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3391 *PMODE is the mode of the inputs (in case they are const_int).
3392 *PUNSIGNEDP nonzero says that the operands are unsigned;
3393 this matters if they need to be widened.
3395 If they have mode BLKmode, then SIZE specifies the size of both operands.
3397 This function performs all the setup necessary so that the caller only has
3398 to emit a single comparison insn. This setup can involve doing a BLKmode
3399 comparison or emitting a library call to perform the comparison if no insn
3400 is available to handle it.
3401 The values which are passed in through pointers can be modified; the caller
3402 should perform the comparison on the modified values. Constant
3403 comparisons must have already been folded. */
3406 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3407 enum machine_mode
*pmode
, int *punsignedp
,
3408 enum can_compare_purpose purpose
)
3410 enum machine_mode mode
= *pmode
;
3411 rtx x
= *px
, y
= *py
;
3412 int unsignedp
= *punsignedp
;
3413 enum mode_class
class;
3415 class = GET_MODE_CLASS (mode
);
3417 /* If we are inside an appropriately-short loop and we are optimizing,
3418 force expensive constants into a register. */
3419 if (CONSTANT_P (x
) && optimize
3420 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3421 x
= force_reg (mode
, x
);
3423 if (CONSTANT_P (y
) && optimize
3424 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3425 y
= force_reg (mode
, y
);
3428 /* Make sure if we have a canonical comparison. The RTL
3429 documentation states that canonical comparisons are required only
3430 for targets which have cc0. */
3431 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3434 /* Don't let both operands fail to indicate the mode. */
3435 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3436 x
= force_reg (mode
, x
);
3438 /* Handle all BLKmode compares. */
3440 if (mode
== BLKmode
)
3442 enum machine_mode cmp_mode
, result_mode
;
3443 enum insn_code cmp_code
;
3448 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3452 /* Try to use a memory block compare insn - either cmpstr
3453 or cmpmem will do. */
3454 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3455 cmp_mode
!= VOIDmode
;
3456 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3458 cmp_code
= cmpmem_optab
[cmp_mode
];
3459 if (cmp_code
== CODE_FOR_nothing
)
3460 cmp_code
= cmpstr_optab
[cmp_mode
];
3461 if (cmp_code
== CODE_FOR_nothing
)
3462 cmp_code
= cmpstrn_optab
[cmp_mode
];
3463 if (cmp_code
== CODE_FOR_nothing
)
3466 /* Must make sure the size fits the insn's mode. */
3467 if ((GET_CODE (size
) == CONST_INT
3468 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3469 || (GET_MODE_BITSIZE (GET_MODE (size
))
3470 > GET_MODE_BITSIZE (cmp_mode
)))
3473 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3474 result
= gen_reg_rtx (result_mode
);
3475 size
= convert_to_mode (cmp_mode
, size
, 1);
3476 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3480 *pmode
= result_mode
;
3484 /* Otherwise call a library function, memcmp. */
3485 libfunc
= memcmp_libfunc
;
3486 length_type
= sizetype
;
3487 result_mode
= TYPE_MODE (integer_type_node
);
3488 cmp_mode
= TYPE_MODE (length_type
);
3489 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3490 TYPE_UNSIGNED (length_type
));
3492 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3499 *pmode
= result_mode
;
3503 /* Don't allow operands to the compare to trap, as that can put the
3504 compare and branch in different basic blocks. */
3505 if (flag_non_call_exceptions
)
3508 x
= force_reg (mode
, x
);
3510 y
= force_reg (mode
, y
);
3515 if (can_compare_p (*pcomparison
, mode
, purpose
))
3518 /* Handle a lib call just for the mode we are using. */
3520 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3522 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3525 /* If we want unsigned, and this mode has a distinct unsigned
3526 comparison routine, use that. */
3527 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3528 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3530 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3531 word_mode
, 2, x
, mode
, y
, mode
);
3533 /* There are two kinds of comparison routines. Biased routines
3534 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3535 of gcc expect that the comparison operation is equivalent
3536 to the modified comparison. For signed comparisons compare the
3537 result against 1 in the biased case, and zero in the unbiased
3538 case. For unsigned comparisons always compare against 1 after
3539 biasing the unbiased result by adding 1. This gives us a way to
3545 if (!TARGET_LIB_INT_CMP_BIASED
)
3548 *px
= plus_constant (result
, 1);
3555 gcc_assert (class == MODE_FLOAT
);
3556 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3559 /* Before emitting an insn with code ICODE, make sure that X, which is going
3560 to be used for operand OPNUM of the insn, is converted from mode MODE to
3561 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3562 that it is accepted by the operand predicate. Return the new value. */
3565 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3566 enum machine_mode wider_mode
, int unsignedp
)
3568 if (mode
!= wider_mode
)
3569 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3571 if (!insn_data
[icode
].operand
[opnum
].predicate
3572 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3576 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3582 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3583 we can do the comparison.
3584 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3585 be NULL_RTX which indicates that only a comparison is to be generated. */
3588 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3589 enum rtx_code comparison
, int unsignedp
, rtx label
)
3591 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3592 enum mode_class
class = GET_MODE_CLASS (mode
);
3593 enum machine_mode wider_mode
= mode
;
3595 /* Try combined insns first. */
3598 enum insn_code icode
;
3599 PUT_MODE (test
, wider_mode
);
3603 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3605 if (icode
!= CODE_FOR_nothing
3606 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3608 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3609 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3610 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3615 /* Handle some compares against zero. */
3616 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3617 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3619 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3620 emit_insn (GEN_FCN (icode
) (x
));
3622 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3626 /* Handle compares for which there is a directly suitable insn. */
3628 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3629 if (icode
!= CODE_FOR_nothing
)
3631 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3632 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3633 emit_insn (GEN_FCN (icode
) (x
, y
));
3635 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3639 if (class != MODE_INT
&& class != MODE_FLOAT
3640 && class != MODE_COMPLEX_FLOAT
)
3643 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3645 while (wider_mode
!= VOIDmode
);
3650 /* Generate code to compare X with Y so that the condition codes are
3651 set and to jump to LABEL if the condition is true. If X is a
3652 constant and Y is not a constant, then the comparison is swapped to
3653 ensure that the comparison RTL has the canonical form.
3655 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3656 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3657 the proper branch condition code.
3659 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3661 MODE is the mode of the inputs (in case they are const_int).
3663 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3664 be passed unchanged to emit_cmp_insn, then potentially converted into an
3665 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3668 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3669 enum machine_mode mode
, int unsignedp
, rtx label
)
3671 rtx op0
= x
, op1
= y
;
3673 /* Swap operands and condition to ensure canonical RTL. */
3674 if (swap_commutative_operands_p (x
, y
))
3676 /* If we're not emitting a branch, this means some caller
3681 comparison
= swap_condition (comparison
);
3685 /* If OP0 is still a constant, then both X and Y must be constants.
3686 Force X into a register to create canonical RTL. */
3687 if (CONSTANT_P (op0
))
3688 op0
= force_reg (mode
, op0
);
3692 comparison
= unsigned_condition (comparison
);
3694 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3696 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3699 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3702 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3703 enum machine_mode mode
, int unsignedp
)
3705 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3708 /* Emit a library call comparison between floating point X and Y.
3709 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3712 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3713 enum machine_mode
*pmode
, int *punsignedp
)
3715 enum rtx_code comparison
= *pcomparison
;
3716 enum rtx_code swapped
= swap_condition (comparison
);
3717 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3720 enum machine_mode orig_mode
= GET_MODE (x
);
3721 enum machine_mode mode
;
3722 rtx value
, target
, insns
, equiv
;
3724 bool reversed_p
= false;
3726 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3728 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3731 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3734 tmp
= x
; x
= y
; y
= tmp
;
3735 comparison
= swapped
;
3739 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3740 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3742 comparison
= reversed
;
3748 gcc_assert (mode
!= VOIDmode
);
3750 if (mode
!= orig_mode
)
3752 x
= convert_to_mode (mode
, x
, 0);
3753 y
= convert_to_mode (mode
, y
, 0);
3756 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3757 the RTL. The allows the RTL optimizers to delete the libcall if the
3758 condition can be determined at compile-time. */
3759 if (comparison
== UNORDERED
)
3761 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3762 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3763 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3764 temp
, const_true_rtx
, equiv
);
3768 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3769 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3771 rtx true_rtx
, false_rtx
;
3776 true_rtx
= const0_rtx
;
3777 false_rtx
= const_true_rtx
;
3781 true_rtx
= const_true_rtx
;
3782 false_rtx
= const0_rtx
;
3786 true_rtx
= const1_rtx
;
3787 false_rtx
= const0_rtx
;
3791 true_rtx
= const0_rtx
;
3792 false_rtx
= constm1_rtx
;
3796 true_rtx
= constm1_rtx
;
3797 false_rtx
= const0_rtx
;
3801 true_rtx
= const0_rtx
;
3802 false_rtx
= const1_rtx
;
3808 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3809 equiv
, true_rtx
, false_rtx
);
3814 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3815 word_mode
, 2, x
, mode
, y
, mode
);
3816 insns
= get_insns ();
3819 target
= gen_reg_rtx (word_mode
);
3820 emit_libcall_block (insns
, target
, value
, equiv
);
3822 if (comparison
== UNORDERED
3823 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3824 comparison
= reversed_p
? EQ
: NE
;
3829 *pcomparison
= comparison
;
3833 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3836 emit_indirect_jump (rtx loc
)
3838 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
3840 loc
= copy_to_mode_reg (Pmode
, loc
);
3842 emit_jump_insn (gen_indirect_jump (loc
));
3846 #ifdef HAVE_conditional_move
3848 /* Emit a conditional move instruction if the machine supports one for that
3849 condition and machine mode.
3851 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3852 the mode to use should they be constants. If it is VOIDmode, they cannot
3855 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3856 should be stored there. MODE is the mode to use should they be constants.
3857 If it is VOIDmode, they cannot both be constants.
3859 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3860 is not supported. */
3863 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3864 enum machine_mode cmode
, rtx op2
, rtx op3
,
3865 enum machine_mode mode
, int unsignedp
)
3867 rtx tem
, subtarget
, comparison
, insn
;
3868 enum insn_code icode
;
3869 enum rtx_code reversed
;
3871 /* If one operand is constant, make it the second one. Only do this
3872 if the other operand is not constant as well. */
3874 if (swap_commutative_operands_p (op0
, op1
))
3879 code
= swap_condition (code
);
3882 /* get_condition will prefer to generate LT and GT even if the old
3883 comparison was against zero, so undo that canonicalization here since
3884 comparisons against zero are cheaper. */
3885 if (code
== LT
&& op1
== const1_rtx
)
3886 code
= LE
, op1
= const0_rtx
;
3887 else if (code
== GT
&& op1
== constm1_rtx
)
3888 code
= GE
, op1
= const0_rtx
;
3890 if (cmode
== VOIDmode
)
3891 cmode
= GET_MODE (op0
);
3893 if (swap_commutative_operands_p (op2
, op3
)
3894 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3903 if (mode
== VOIDmode
)
3904 mode
= GET_MODE (op2
);
3906 icode
= movcc_gen_code
[mode
];
3908 if (icode
== CODE_FOR_nothing
)
3912 target
= gen_reg_rtx (mode
);
3916 /* If the insn doesn't accept these operands, put them in pseudos. */
3918 if (!insn_data
[icode
].operand
[0].predicate
3919 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3920 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3922 if (!insn_data
[icode
].operand
[2].predicate
3923 (op2
, insn_data
[icode
].operand
[2].mode
))
3924 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3926 if (!insn_data
[icode
].operand
[3].predicate
3927 (op3
, insn_data
[icode
].operand
[3].mode
))
3928 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3930 /* Everything should now be in the suitable form, so emit the compare insn
3931 and then the conditional move. */
3934 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3936 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3937 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3938 return NULL and let the caller figure out how best to deal with this
3940 if (GET_CODE (comparison
) != code
)
3943 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3945 /* If that failed, then give up. */
3951 if (subtarget
!= target
)
3952 convert_move (target
, subtarget
, 0);
3957 /* Return nonzero if a conditional move of mode MODE is supported.
3959 This function is for combine so it can tell whether an insn that looks
3960 like a conditional move is actually supported by the hardware. If we
3961 guess wrong we lose a bit on optimization, but that's it. */
3962 /* ??? sparc64 supports conditionally moving integers values based on fp
3963 comparisons, and vice versa. How do we handle them? */
3966 can_conditionally_move_p (enum machine_mode mode
)
3968 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3974 #endif /* HAVE_conditional_move */
3976 /* Emit a conditional addition instruction if the machine supports one for that
3977 condition and machine mode.
3979 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3980 the mode to use should they be constants. If it is VOIDmode, they cannot
3983 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3984 should be stored there. MODE is the mode to use should they be constants.
3985 If it is VOIDmode, they cannot both be constants.
3987 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3988 is not supported. */
3991 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3992 enum machine_mode cmode
, rtx op2
, rtx op3
,
3993 enum machine_mode mode
, int unsignedp
)
3995 rtx tem
, subtarget
, comparison
, insn
;
3996 enum insn_code icode
;
3997 enum rtx_code reversed
;
3999 /* If one operand is constant, make it the second one. Only do this
4000 if the other operand is not constant as well. */
4002 if (swap_commutative_operands_p (op0
, op1
))
4007 code
= swap_condition (code
);
4010 /* get_condition will prefer to generate LT and GT even if the old
4011 comparison was against zero, so undo that canonicalization here since
4012 comparisons against zero are cheaper. */
4013 if (code
== LT
&& op1
== const1_rtx
)
4014 code
= LE
, op1
= const0_rtx
;
4015 else if (code
== GT
&& op1
== constm1_rtx
)
4016 code
= GE
, op1
= const0_rtx
;
4018 if (cmode
== VOIDmode
)
4019 cmode
= GET_MODE (op0
);
4021 if (swap_commutative_operands_p (op2
, op3
)
4022 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4031 if (mode
== VOIDmode
)
4032 mode
= GET_MODE (op2
);
4034 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4036 if (icode
== CODE_FOR_nothing
)
4040 target
= gen_reg_rtx (mode
);
4042 /* If the insn doesn't accept these operands, put them in pseudos. */
4044 if (!insn_data
[icode
].operand
[0].predicate
4045 (target
, insn_data
[icode
].operand
[0].mode
))
4046 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4050 if (!insn_data
[icode
].operand
[2].predicate
4051 (op2
, insn_data
[icode
].operand
[2].mode
))
4052 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4054 if (!insn_data
[icode
].operand
[3].predicate
4055 (op3
, insn_data
[icode
].operand
[3].mode
))
4056 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4058 /* Everything should now be in the suitable form, so emit the compare insn
4059 and then the conditional move. */
4062 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4064 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4065 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4066 return NULL and let the caller figure out how best to deal with this
4068 if (GET_CODE (comparison
) != code
)
4071 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4073 /* If that failed, then give up. */
4079 if (subtarget
!= target
)
4080 convert_move (target
, subtarget
, 0);
4085 /* These functions attempt to generate an insn body, rather than
4086 emitting the insn, but if the gen function already emits them, we
4087 make no attempt to turn them back into naked patterns. */
4089 /* Generate and return an insn body to add Y to X. */
4092 gen_add2_insn (rtx x
, rtx y
)
4094 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4096 gcc_assert (insn_data
[icode
].operand
[0].predicate
4097 (x
, insn_data
[icode
].operand
[0].mode
));
4098 gcc_assert (insn_data
[icode
].operand
[1].predicate
4099 (x
, insn_data
[icode
].operand
[1].mode
));
4100 gcc_assert (insn_data
[icode
].operand
[2].predicate
4101 (y
, insn_data
[icode
].operand
[2].mode
));
4103 return GEN_FCN (icode
) (x
, x
, y
);
4106 /* Generate and return an insn body to add r1 and c,
4107 storing the result in r0. */
4109 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4111 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4113 if (icode
== CODE_FOR_nothing
4114 || !(insn_data
[icode
].operand
[0].predicate
4115 (r0
, insn_data
[icode
].operand
[0].mode
))
4116 || !(insn_data
[icode
].operand
[1].predicate
4117 (r1
, insn_data
[icode
].operand
[1].mode
))
4118 || !(insn_data
[icode
].operand
[2].predicate
4119 (c
, insn_data
[icode
].operand
[2].mode
)))
4122 return GEN_FCN (icode
) (r0
, r1
, c
);
4126 have_add2_insn (rtx x
, rtx y
)
4130 gcc_assert (GET_MODE (x
) != VOIDmode
);
4132 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4134 if (icode
== CODE_FOR_nothing
)
4137 if (!(insn_data
[icode
].operand
[0].predicate
4138 (x
, insn_data
[icode
].operand
[0].mode
))
4139 || !(insn_data
[icode
].operand
[1].predicate
4140 (x
, insn_data
[icode
].operand
[1].mode
))
4141 || !(insn_data
[icode
].operand
[2].predicate
4142 (y
, insn_data
[icode
].operand
[2].mode
)))
4148 /* Generate and return an insn body to subtract Y from X. */
4151 gen_sub2_insn (rtx x
, rtx y
)
4153 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4155 gcc_assert (insn_data
[icode
].operand
[0].predicate
4156 (x
, insn_data
[icode
].operand
[0].mode
));
4157 gcc_assert (insn_data
[icode
].operand
[1].predicate
4158 (x
, insn_data
[icode
].operand
[1].mode
));
4159 gcc_assert (insn_data
[icode
].operand
[2].predicate
4160 (y
, insn_data
[icode
].operand
[2].mode
));
4162 return GEN_FCN (icode
) (x
, x
, y
);
4165 /* Generate and return an insn body to subtract r1 and c,
4166 storing the result in r0. */
4168 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4170 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4172 if (icode
== CODE_FOR_nothing
4173 || !(insn_data
[icode
].operand
[0].predicate
4174 (r0
, insn_data
[icode
].operand
[0].mode
))
4175 || !(insn_data
[icode
].operand
[1].predicate
4176 (r1
, insn_data
[icode
].operand
[1].mode
))
4177 || !(insn_data
[icode
].operand
[2].predicate
4178 (c
, insn_data
[icode
].operand
[2].mode
)))
4181 return GEN_FCN (icode
) (r0
, r1
, c
);
4185 have_sub2_insn (rtx x
, rtx y
)
4189 gcc_assert (GET_MODE (x
) != VOIDmode
);
4191 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4193 if (icode
== CODE_FOR_nothing
)
4196 if (!(insn_data
[icode
].operand
[0].predicate
4197 (x
, insn_data
[icode
].operand
[0].mode
))
4198 || !(insn_data
[icode
].operand
[1].predicate
4199 (x
, insn_data
[icode
].operand
[1].mode
))
4200 || !(insn_data
[icode
].operand
[2].predicate
4201 (y
, insn_data
[icode
].operand
[2].mode
)))
4207 /* Generate the body of an instruction to copy Y into X.
4208 It may be a list of insns, if one insn isn't enough. */
4211 gen_move_insn (rtx x
, rtx y
)
4216 emit_move_insn_1 (x
, y
);
4222 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4223 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4224 no such operation exists, CODE_FOR_nothing will be returned. */
4227 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4231 #ifdef HAVE_ptr_extend
4233 return CODE_FOR_ptr_extend
;
4236 tab
= unsignedp
? zext_optab
: sext_optab
;
4237 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4240 /* Generate the body of an insn to extend Y (with mode MFROM)
4241 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4244 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4245 enum machine_mode mfrom
, int unsignedp
)
4247 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4248 return GEN_FCN (icode
) (x
, y
);
4251 /* can_fix_p and can_float_p say whether the target machine
4252 can directly convert a given fixed point type to
4253 a given floating point type, or vice versa.
4254 The returned value is the CODE_FOR_... value to use,
4255 or CODE_FOR_nothing if these modes cannot be directly converted.
4257 *TRUNCP_PTR is set to 1 if it is necessary to output
4258 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4260 static enum insn_code
4261 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4262 int unsignedp
, int *truncp_ptr
)
4265 enum insn_code icode
;
4267 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4268 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4269 if (icode
!= CODE_FOR_nothing
)
4275 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4276 for this to work. We need to rework the fix* and ftrunc* patterns
4277 and documentation. */
4278 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4279 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4280 if (icode
!= CODE_FOR_nothing
4281 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4288 return CODE_FOR_nothing
;
4291 static enum insn_code
4292 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4297 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4298 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4301 /* Generate code to convert FROM to floating point
4302 and store in TO. FROM must be fixed point and not VOIDmode.
4303 UNSIGNEDP nonzero means regard FROM as unsigned.
4304 Normally this is done by correcting the final value
4305 if it is negative. */
4308 expand_float (rtx to
, rtx from
, int unsignedp
)
4310 enum insn_code icode
;
4312 enum machine_mode fmode
, imode
;
4314 /* Crash now, because we won't be able to decide which mode to use. */
4315 gcc_assert (GET_MODE (from
) != VOIDmode
);
4317 /* Look for an insn to do the conversion. Do it in the specified
4318 modes if possible; otherwise convert either input, output or both to
4319 wider mode. If the integer mode is wider than the mode of FROM,
4320 we can do the conversion signed even if the input is unsigned. */
4322 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4323 fmode
= GET_MODE_WIDER_MODE (fmode
))
4324 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4325 imode
= GET_MODE_WIDER_MODE (imode
))
4327 int doing_unsigned
= unsignedp
;
4329 if (fmode
!= GET_MODE (to
)
4330 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4333 icode
= can_float_p (fmode
, imode
, unsignedp
);
4334 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4335 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4337 if (icode
!= CODE_FOR_nothing
)
4339 if (imode
!= GET_MODE (from
))
4340 from
= convert_to_mode (imode
, from
, unsignedp
);
4342 if (fmode
!= GET_MODE (to
))
4343 target
= gen_reg_rtx (fmode
);
4345 emit_unop_insn (icode
, target
, from
,
4346 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4349 convert_move (to
, target
, 0);
4354 /* Unsigned integer, and no way to convert directly.
4355 Convert as signed, then conditionally adjust the result. */
4358 rtx label
= gen_label_rtx ();
4360 REAL_VALUE_TYPE offset
;
4362 /* Look for a usable floating mode FMODE wider than the source and at
4363 least as wide as the target. Using FMODE will avoid rounding woes
4364 with unsigned values greater than the signed maximum value. */
4366 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4367 fmode
= GET_MODE_WIDER_MODE (fmode
))
4368 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4369 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4372 if (fmode
== VOIDmode
)
4374 /* There is no such mode. Pretend the target is wide enough. */
4375 fmode
= GET_MODE (to
);
4377 /* Avoid double-rounding when TO is narrower than FROM. */
4378 if ((significand_size (fmode
) + 1)
4379 < GET_MODE_BITSIZE (GET_MODE (from
)))
4382 rtx neglabel
= gen_label_rtx ();
4384 /* Don't use TARGET if it isn't a register, is a hard register,
4385 or is the wrong mode. */
4387 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4388 || GET_MODE (target
) != fmode
)
4389 target
= gen_reg_rtx (fmode
);
4391 imode
= GET_MODE (from
);
4392 do_pending_stack_adjust ();
4394 /* Test whether the sign bit is set. */
4395 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4398 /* The sign bit is not set. Convert as signed. */
4399 expand_float (target
, from
, 0);
4400 emit_jump_insn (gen_jump (label
));
4403 /* The sign bit is set.
4404 Convert to a usable (positive signed) value by shifting right
4405 one bit, while remembering if a nonzero bit was shifted
4406 out; i.e., compute (from & 1) | (from >> 1). */
4408 emit_label (neglabel
);
4409 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4410 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4411 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4413 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4415 expand_float (target
, temp
, 0);
4417 /* Multiply by 2 to undo the shift above. */
4418 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4419 target
, 0, OPTAB_LIB_WIDEN
);
4421 emit_move_insn (target
, temp
);
4423 do_pending_stack_adjust ();
4429 /* If we are about to do some arithmetic to correct for an
4430 unsigned operand, do it in a pseudo-register. */
4432 if (GET_MODE (to
) != fmode
4433 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4434 target
= gen_reg_rtx (fmode
);
4436 /* Convert as signed integer to floating. */
4437 expand_float (target
, from
, 0);
4439 /* If FROM is negative (and therefore TO is negative),
4440 correct its value by 2**bitwidth. */
4442 do_pending_stack_adjust ();
4443 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4447 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4448 temp
= expand_binop (fmode
, add_optab
, target
,
4449 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4450 target
, 0, OPTAB_LIB_WIDEN
);
4452 emit_move_insn (target
, temp
);
4454 do_pending_stack_adjust ();
4459 /* No hardware instruction available; call a library routine. */
4464 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4466 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4467 from
= convert_to_mode (SImode
, from
, unsignedp
);
4469 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4470 gcc_assert (libfunc
);
4474 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4475 GET_MODE (to
), 1, from
,
4477 insns
= get_insns ();
4480 emit_libcall_block (insns
, target
, value
,
4481 gen_rtx_FLOAT (GET_MODE (to
), from
));
4486 /* Copy result to requested destination
4487 if we have been computing in a temp location. */
4491 if (GET_MODE (target
) == GET_MODE (to
))
4492 emit_move_insn (to
, target
);
4494 convert_move (to
, target
, 0);
4498 /* Generate code to convert FROM to fixed point and store in TO. FROM
4499 must be floating point. */
4502 expand_fix (rtx to
, rtx from
, int unsignedp
)
4504 enum insn_code icode
;
4506 enum machine_mode fmode
, imode
;
4509 /* We first try to find a pair of modes, one real and one integer, at
4510 least as wide as FROM and TO, respectively, in which we can open-code
4511 this conversion. If the integer mode is wider than the mode of TO,
4512 we can do the conversion either signed or unsigned. */
4514 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4515 fmode
= GET_MODE_WIDER_MODE (fmode
))
4516 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4517 imode
= GET_MODE_WIDER_MODE (imode
))
4519 int doing_unsigned
= unsignedp
;
4521 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4522 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4523 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4525 if (icode
!= CODE_FOR_nothing
)
4527 if (fmode
!= GET_MODE (from
))
4528 from
= convert_to_mode (fmode
, from
, 0);
4532 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4533 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4537 if (imode
!= GET_MODE (to
))
4538 target
= gen_reg_rtx (imode
);
4540 emit_unop_insn (icode
, target
, from
,
4541 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4543 convert_move (to
, target
, unsignedp
);
4548 /* For an unsigned conversion, there is one more way to do it.
4549 If we have a signed conversion, we generate code that compares
4550 the real value to the largest representable positive number. If if
4551 is smaller, the conversion is done normally. Otherwise, subtract
4552 one plus the highest signed number, convert, and add it back.
4554 We only need to check all real modes, since we know we didn't find
4555 anything with a wider integer mode.
4557 This code used to extend FP value into mode wider than the destination.
4558 This is not needed. Consider, for instance conversion from SFmode
4561 The hot path trought the code is dealing with inputs smaller than 2^63
4562 and doing just the conversion, so there is no bits to lose.
4564 In the other path we know the value is positive in the range 2^63..2^64-1
4565 inclusive. (as for other imput overflow happens and result is undefined)
4566 So we know that the most important bit set in mantissa corresponds to
4567 2^63. The subtraction of 2^63 should not generate any rounding as it
4568 simply clears out that bit. The rest is trivial. */
4570 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4571 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4572 fmode
= GET_MODE_WIDER_MODE (fmode
))
4573 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4577 REAL_VALUE_TYPE offset
;
4578 rtx limit
, lab1
, lab2
, insn
;
4580 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4581 real_2expN (&offset
, bitsize
- 1);
4582 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4583 lab1
= gen_label_rtx ();
4584 lab2
= gen_label_rtx ();
4586 if (fmode
!= GET_MODE (from
))
4587 from
= convert_to_mode (fmode
, from
, 0);
4589 /* See if we need to do the subtraction. */
4590 do_pending_stack_adjust ();
4591 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4594 /* If not, do the signed "fix" and branch around fixup code. */
4595 expand_fix (to
, from
, 0);
4596 emit_jump_insn (gen_jump (lab2
));
4599 /* Otherwise, subtract 2**(N-1), convert to signed number,
4600 then add 2**(N-1). Do the addition using XOR since this
4601 will often generate better code. */
4603 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4604 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4605 expand_fix (to
, target
, 0);
4606 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4608 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4610 to
, 1, OPTAB_LIB_WIDEN
);
4613 emit_move_insn (to
, target
);
4617 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4618 != CODE_FOR_nothing
)
4620 /* Make a place for a REG_NOTE and add it. */
4621 insn
= emit_move_insn (to
, to
);
4622 set_unique_reg_note (insn
,
4624 gen_rtx_fmt_e (UNSIGNED_FIX
,
4632 /* We can't do it with an insn, so use a library call. But first ensure
4633 that the mode of TO is at least as wide as SImode, since those are the
4634 only library calls we know about. */
4636 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4638 target
= gen_reg_rtx (SImode
);
4640 expand_fix (target
, from
, unsignedp
);
4648 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4649 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4650 gcc_assert (libfunc
);
4654 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4655 GET_MODE (to
), 1, from
,
4657 insns
= get_insns ();
4660 emit_libcall_block (insns
, target
, value
,
4661 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4662 GET_MODE (to
), from
));
4667 if (GET_MODE (to
) == GET_MODE (target
))
4668 emit_move_insn (to
, target
);
4670 convert_move (to
, target
, 0);
4674 /* Report whether we have an instruction to perform the operation
4675 specified by CODE on operands of mode MODE. */
4677 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4679 return (code_to_optab
[(int) code
] != 0
4680 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4681 != CODE_FOR_nothing
));
4684 /* Create a blank optab. */
4689 optab op
= ggc_alloc (sizeof (struct optab
));
4690 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4692 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4693 op
->handlers
[i
].libfunc
= 0;
4699 static convert_optab
4700 new_convert_optab (void)
4703 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4704 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4705 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4707 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4708 op
->handlers
[i
][j
].libfunc
= 0;
4713 /* Same, but fill in its code as CODE, and write it into the
4714 code_to_optab table. */
4716 init_optab (enum rtx_code code
)
4718 optab op
= new_optab ();
4720 code_to_optab
[(int) code
] = op
;
4724 /* Same, but fill in its code as CODE, and do _not_ write it into
4725 the code_to_optab table. */
4727 init_optabv (enum rtx_code code
)
4729 optab op
= new_optab ();
4734 /* Conversion optabs never go in the code_to_optab table. */
4735 static inline convert_optab
4736 init_convert_optab (enum rtx_code code
)
4738 convert_optab op
= new_convert_optab ();
4743 /* Initialize the libfunc fields of an entire group of entries in some
4744 optab. Each entry is set equal to a string consisting of a leading
4745 pair of underscores followed by a generic operation name followed by
4746 a mode name (downshifted to lowercase) followed by a single character
4747 representing the number of operands for the given operation (which is
4748 usually one of the characters '2', '3', or '4').
4750 OPTABLE is the table in which libfunc fields are to be initialized.
4751 FIRST_MODE is the first machine mode index in the given optab to
4753 LAST_MODE is the last machine mode index in the given optab to
4755 OPNAME is the generic (string) name of the operation.
4756 SUFFIX is the character which specifies the number of operands for
4757 the given generic operation.
4761 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4762 const char *opname
, int suffix
)
4765 unsigned opname_len
= strlen (opname
);
4767 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4768 mode
= (enum machine_mode
) ((int) mode
+ 1))
4770 const char *mname
= GET_MODE_NAME (mode
);
4771 unsigned mname_len
= strlen (mname
);
4772 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4779 for (q
= opname
; *q
; )
4781 for (q
= mname
; *q
; q
++)
4782 *p
++ = TOLOWER (*q
);
4786 optable
->handlers
[(int) mode
].libfunc
4787 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4791 /* Initialize the libfunc fields of an entire group of entries in some
4792 optab which correspond to all integer mode operations. The parameters
4793 have the same meaning as similarly named ones for the `init_libfuncs'
4794 routine. (See above). */
4797 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4799 int maxsize
= 2*BITS_PER_WORD
;
4800 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4801 maxsize
= LONG_LONG_TYPE_SIZE
;
4802 init_libfuncs (optable
, word_mode
,
4803 mode_for_size (maxsize
, MODE_INT
, 0),
4807 /* Initialize the libfunc fields of an entire group of entries in some
4808 optab which correspond to all real mode operations. The parameters
4809 have the same meaning as similarly named ones for the `init_libfuncs'
4810 routine. (See above). */
4813 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4815 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4818 /* Initialize the libfunc fields of an entire group of entries of an
4819 inter-mode-class conversion optab. The string formation rules are
4820 similar to the ones for init_libfuncs, above, but instead of having
4821 a mode name and an operand count these functions have two mode names
4822 and no operand count. */
4824 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4825 enum mode_class from_class
,
4826 enum mode_class to_class
)
4828 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4829 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4830 size_t opname_len
= strlen (opname
);
4831 size_t max_mname_len
= 0;
4833 enum machine_mode fmode
, tmode
;
4834 const char *fname
, *tname
;
4836 char *libfunc_name
, *suffix
;
4839 for (fmode
= first_from_mode
;
4841 fmode
= GET_MODE_WIDER_MODE (fmode
))
4842 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4844 for (tmode
= first_to_mode
;
4846 tmode
= GET_MODE_WIDER_MODE (tmode
))
4847 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4849 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4850 libfunc_name
[0] = '_';
4851 libfunc_name
[1] = '_';
4852 memcpy (&libfunc_name
[2], opname
, opname_len
);
4853 suffix
= libfunc_name
+ opname_len
+ 2;
4855 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4856 fmode
= GET_MODE_WIDER_MODE (fmode
))
4857 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4858 tmode
= GET_MODE_WIDER_MODE (tmode
))
4860 fname
= GET_MODE_NAME (fmode
);
4861 tname
= GET_MODE_NAME (tmode
);
4864 for (q
= fname
; *q
; p
++, q
++)
4866 for (q
= tname
; *q
; p
++, q
++)
4871 tab
->handlers
[tmode
][fmode
].libfunc
4872 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4877 /* Initialize the libfunc fields of an entire group of entries of an
4878 intra-mode-class conversion optab. The string formation rules are
4879 similar to the ones for init_libfunc, above. WIDENING says whether
4880 the optab goes from narrow to wide modes or vice versa. These functions
4881 have two mode names _and_ an operand count. */
4883 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4884 enum mode_class
class, bool widening
)
4886 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4887 size_t opname_len
= strlen (opname
);
4888 size_t max_mname_len
= 0;
4890 enum machine_mode nmode
, wmode
;
4891 const char *nname
, *wname
;
4893 char *libfunc_name
, *suffix
;
4896 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4897 nmode
= GET_MODE_WIDER_MODE (nmode
))
4898 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4900 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4901 libfunc_name
[0] = '_';
4902 libfunc_name
[1] = '_';
4903 memcpy (&libfunc_name
[2], opname
, opname_len
);
4904 suffix
= libfunc_name
+ opname_len
+ 2;
4906 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4907 nmode
= GET_MODE_WIDER_MODE (nmode
))
4908 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4909 wmode
= GET_MODE_WIDER_MODE (wmode
))
4911 nname
= GET_MODE_NAME (nmode
);
4912 wname
= GET_MODE_NAME (wmode
);
4915 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4917 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4923 tab
->handlers
[widening
? wmode
: nmode
]
4924 [widening
? nmode
: wmode
].libfunc
4925 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4932 init_one_libfunc (const char *name
)
4936 /* Create a FUNCTION_DECL that can be passed to
4937 targetm.encode_section_info. */
4938 /* ??? We don't have any type information except for this is
4939 a function. Pretend this is "int foo()". */
4940 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4941 build_function_type (integer_type_node
, NULL_TREE
));
4942 DECL_ARTIFICIAL (decl
) = 1;
4943 DECL_EXTERNAL (decl
) = 1;
4944 TREE_PUBLIC (decl
) = 1;
4946 symbol
= XEXP (DECL_RTL (decl
), 0);
4948 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4949 are the flags assigned by targetm.encode_section_info. */
4950 SYMBOL_REF_DECL (symbol
) = 0;
4955 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4956 MODE to NAME, which should be either 0 or a string constant. */
4958 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4961 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4963 optable
->handlers
[mode
].libfunc
= 0;
4966 /* Call this to reset the function entry for one conversion optab
4967 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4968 either 0 or a string constant. */
4970 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4971 enum machine_mode fmode
, const char *name
)
4974 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4976 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4979 /* Call this once to initialize the contents of the optabs
4980 appropriately for the current target machine. */
4987 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4989 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4990 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4992 #ifdef HAVE_conditional_move
4993 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4994 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4997 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4999 vcond_gen_code
[i
] = CODE_FOR_nothing
;
5000 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
5003 add_optab
= init_optab (PLUS
);
5004 addv_optab
= init_optabv (PLUS
);
5005 sub_optab
= init_optab (MINUS
);
5006 subv_optab
= init_optabv (MINUS
);
5007 smul_optab
= init_optab (MULT
);
5008 smulv_optab
= init_optabv (MULT
);
5009 smul_highpart_optab
= init_optab (UNKNOWN
);
5010 umul_highpart_optab
= init_optab (UNKNOWN
);
5011 smul_widen_optab
= init_optab (UNKNOWN
);
5012 umul_widen_optab
= init_optab (UNKNOWN
);
5013 sdiv_optab
= init_optab (DIV
);
5014 sdivv_optab
= init_optabv (DIV
);
5015 sdivmod_optab
= init_optab (UNKNOWN
);
5016 udiv_optab
= init_optab (UDIV
);
5017 udivmod_optab
= init_optab (UNKNOWN
);
5018 smod_optab
= init_optab (MOD
);
5019 umod_optab
= init_optab (UMOD
);
5020 fmod_optab
= init_optab (UNKNOWN
);
5021 drem_optab
= init_optab (UNKNOWN
);
5022 ftrunc_optab
= init_optab (UNKNOWN
);
5023 and_optab
= init_optab (AND
);
5024 ior_optab
= init_optab (IOR
);
5025 xor_optab
= init_optab (XOR
);
5026 ashl_optab
= init_optab (ASHIFT
);
5027 ashr_optab
= init_optab (ASHIFTRT
);
5028 lshr_optab
= init_optab (LSHIFTRT
);
5029 rotl_optab
= init_optab (ROTATE
);
5030 rotr_optab
= init_optab (ROTATERT
);
5031 smin_optab
= init_optab (SMIN
);
5032 smax_optab
= init_optab (SMAX
);
5033 umin_optab
= init_optab (UMIN
);
5034 umax_optab
= init_optab (UMAX
);
5035 pow_optab
= init_optab (UNKNOWN
);
5036 atan2_optab
= init_optab (UNKNOWN
);
5038 /* These three have codes assigned exclusively for the sake of
5040 mov_optab
= init_optab (SET
);
5041 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5042 cmp_optab
= init_optab (COMPARE
);
5044 ucmp_optab
= init_optab (UNKNOWN
);
5045 tst_optab
= init_optab (UNKNOWN
);
5047 eq_optab
= init_optab (EQ
);
5048 ne_optab
= init_optab (NE
);
5049 gt_optab
= init_optab (GT
);
5050 ge_optab
= init_optab (GE
);
5051 lt_optab
= init_optab (LT
);
5052 le_optab
= init_optab (LE
);
5053 unord_optab
= init_optab (UNORDERED
);
5055 neg_optab
= init_optab (NEG
);
5056 negv_optab
= init_optabv (NEG
);
5057 abs_optab
= init_optab (ABS
);
5058 absv_optab
= init_optabv (ABS
);
5059 addcc_optab
= init_optab (UNKNOWN
);
5060 one_cmpl_optab
= init_optab (NOT
);
5061 ffs_optab
= init_optab (FFS
);
5062 clz_optab
= init_optab (CLZ
);
5063 ctz_optab
= init_optab (CTZ
);
5064 popcount_optab
= init_optab (POPCOUNT
);
5065 parity_optab
= init_optab (PARITY
);
5066 sqrt_optab
= init_optab (SQRT
);
5067 floor_optab
= init_optab (UNKNOWN
);
5068 lfloor_optab
= init_optab (UNKNOWN
);
5069 ceil_optab
= init_optab (UNKNOWN
);
5070 lceil_optab
= init_optab (UNKNOWN
);
5071 round_optab
= init_optab (UNKNOWN
);
5072 btrunc_optab
= init_optab (UNKNOWN
);
5073 nearbyint_optab
= init_optab (UNKNOWN
);
5074 rint_optab
= init_optab (UNKNOWN
);
5075 lrint_optab
= init_optab (UNKNOWN
);
5076 sincos_optab
= init_optab (UNKNOWN
);
5077 sin_optab
= init_optab (UNKNOWN
);
5078 asin_optab
= init_optab (UNKNOWN
);
5079 cos_optab
= init_optab (UNKNOWN
);
5080 acos_optab
= init_optab (UNKNOWN
);
5081 exp_optab
= init_optab (UNKNOWN
);
5082 exp10_optab
= init_optab (UNKNOWN
);
5083 exp2_optab
= init_optab (UNKNOWN
);
5084 expm1_optab
= init_optab (UNKNOWN
);
5085 ldexp_optab
= init_optab (UNKNOWN
);
5086 logb_optab
= init_optab (UNKNOWN
);
5087 ilogb_optab
= init_optab (UNKNOWN
);
5088 log_optab
= init_optab (UNKNOWN
);
5089 log10_optab
= init_optab (UNKNOWN
);
5090 log2_optab
= init_optab (UNKNOWN
);
5091 log1p_optab
= init_optab (UNKNOWN
);
5092 tan_optab
= init_optab (UNKNOWN
);
5093 atan_optab
= init_optab (UNKNOWN
);
5094 copysign_optab
= init_optab (UNKNOWN
);
5096 strlen_optab
= init_optab (UNKNOWN
);
5097 cbranch_optab
= init_optab (UNKNOWN
);
5098 cmov_optab
= init_optab (UNKNOWN
);
5099 cstore_optab
= init_optab (UNKNOWN
);
5100 push_optab
= init_optab (UNKNOWN
);
5102 reduc_smax_optab
= init_optab (UNKNOWN
);
5103 reduc_umax_optab
= init_optab (UNKNOWN
);
5104 reduc_smin_optab
= init_optab (UNKNOWN
);
5105 reduc_umin_optab
= init_optab (UNKNOWN
);
5106 reduc_splus_optab
= init_optab (UNKNOWN
);
5107 reduc_uplus_optab
= init_optab (UNKNOWN
);
5109 vec_extract_optab
= init_optab (UNKNOWN
);
5110 vec_set_optab
= init_optab (UNKNOWN
);
5111 vec_init_optab
= init_optab (UNKNOWN
);
5112 vec_shl_optab
= init_optab (UNKNOWN
);
5113 vec_shr_optab
= init_optab (UNKNOWN
);
5114 vec_realign_load_optab
= init_optab (UNKNOWN
);
5115 movmisalign_optab
= init_optab (UNKNOWN
);
5117 powi_optab
= init_optab (UNKNOWN
);
5120 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5121 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5122 trunc_optab
= init_convert_optab (TRUNCATE
);
5123 sfix_optab
= init_convert_optab (FIX
);
5124 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5125 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5126 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5127 sfloat_optab
= init_convert_optab (FLOAT
);
5128 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5130 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5132 movmem_optab
[i
] = CODE_FOR_nothing
;
5133 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5134 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
5135 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5136 setmem_optab
[i
] = CODE_FOR_nothing
;
5138 sync_add_optab
[i
] = CODE_FOR_nothing
;
5139 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5140 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5141 sync_and_optab
[i
] = CODE_FOR_nothing
;
5142 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5143 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5144 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5145 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5146 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5147 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5148 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5149 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5150 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5151 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5152 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5153 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5154 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5155 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5156 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5157 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5158 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5159 sync_lock_release
[i
] = CODE_FOR_nothing
;
5161 #ifdef HAVE_SECONDARY_RELOADS
5162 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5166 /* Fill in the optabs with the insns we support. */
5169 /* Initialize the optabs with the names of the library functions. */
5170 init_integral_libfuncs (add_optab
, "add", '3');
5171 init_floating_libfuncs (add_optab
, "add", '3');
5172 init_integral_libfuncs (addv_optab
, "addv", '3');
5173 init_floating_libfuncs (addv_optab
, "add", '3');
5174 init_integral_libfuncs (sub_optab
, "sub", '3');
5175 init_floating_libfuncs (sub_optab
, "sub", '3');
5176 init_integral_libfuncs (subv_optab
, "subv", '3');
5177 init_floating_libfuncs (subv_optab
, "sub", '3');
5178 init_integral_libfuncs (smul_optab
, "mul", '3');
5179 init_floating_libfuncs (smul_optab
, "mul", '3');
5180 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5181 init_floating_libfuncs (smulv_optab
, "mul", '3');
5182 init_integral_libfuncs (sdiv_optab
, "div", '3');
5183 init_floating_libfuncs (sdiv_optab
, "div", '3');
5184 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5185 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5186 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5187 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5188 init_integral_libfuncs (smod_optab
, "mod", '3');
5189 init_integral_libfuncs (umod_optab
, "umod", '3');
5190 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5191 init_integral_libfuncs (and_optab
, "and", '3');
5192 init_integral_libfuncs (ior_optab
, "ior", '3');
5193 init_integral_libfuncs (xor_optab
, "xor", '3');
5194 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5195 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5196 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5197 init_integral_libfuncs (smin_optab
, "min", '3');
5198 init_floating_libfuncs (smin_optab
, "min", '3');
5199 init_integral_libfuncs (smax_optab
, "max", '3');
5200 init_floating_libfuncs (smax_optab
, "max", '3');
5201 init_integral_libfuncs (umin_optab
, "umin", '3');
5202 init_integral_libfuncs (umax_optab
, "umax", '3');
5203 init_integral_libfuncs (neg_optab
, "neg", '2');
5204 init_floating_libfuncs (neg_optab
, "neg", '2');
5205 init_integral_libfuncs (negv_optab
, "negv", '2');
5206 init_floating_libfuncs (negv_optab
, "neg", '2');
5207 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5208 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5209 init_integral_libfuncs (clz_optab
, "clz", '2');
5210 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5211 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5212 init_integral_libfuncs (parity_optab
, "parity", '2');
5214 /* Comparison libcalls for integers MUST come in pairs,
5216 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5217 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5218 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5220 /* EQ etc are floating point only. */
5221 init_floating_libfuncs (eq_optab
, "eq", '2');
5222 init_floating_libfuncs (ne_optab
, "ne", '2');
5223 init_floating_libfuncs (gt_optab
, "gt", '2');
5224 init_floating_libfuncs (ge_optab
, "ge", '2');
5225 init_floating_libfuncs (lt_optab
, "lt", '2');
5226 init_floating_libfuncs (le_optab
, "le", '2');
5227 init_floating_libfuncs (unord_optab
, "unord", '2');
5229 init_floating_libfuncs (powi_optab
, "powi", '2');
5232 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5233 MODE_INT
, MODE_FLOAT
);
5234 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5235 MODE_FLOAT
, MODE_INT
);
5236 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5237 MODE_FLOAT
, MODE_INT
);
5239 /* sext_optab is also used for FLOAT_EXTEND. */
5240 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5241 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5243 /* Use cabs for double complex abs, since systems generally have cabs.
5244 Don't define any libcall for float complex, so that cabs will be used. */
5245 if (complex_double_type_node
)
5246 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5247 = init_one_libfunc ("cabs");
5249 /* The ffs function operates on `int'. */
5250 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5251 = init_one_libfunc ("ffs");
5253 abort_libfunc
= init_one_libfunc ("abort");
5254 memcpy_libfunc
= init_one_libfunc ("memcpy");
5255 memmove_libfunc
= init_one_libfunc ("memmove");
5256 memcmp_libfunc
= init_one_libfunc ("memcmp");
5257 memset_libfunc
= init_one_libfunc ("memset");
5258 setbits_libfunc
= init_one_libfunc ("__setbits");
5260 #ifndef DONT_USE_BUILTIN_SETJMP
5261 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5262 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5264 setjmp_libfunc
= init_one_libfunc ("setjmp");
5265 longjmp_libfunc
= init_one_libfunc ("longjmp");
5267 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5268 unwind_sjlj_unregister_libfunc
5269 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5271 /* For function entry/exit instrumentation. */
5272 profile_function_entry_libfunc
5273 = init_one_libfunc ("__cyg_profile_func_enter");
5274 profile_function_exit_libfunc
5275 = init_one_libfunc ("__cyg_profile_func_exit");
5277 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5279 if (HAVE_conditional_trap
)
5280 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5282 /* Allow the target to add more libcalls or rename some, etc. */
5283 targetm
.init_libfuncs ();
5288 /* Print information about the current contents of the optabs on
5292 debug_optab_libfuncs (void)
5298 /* Dump the arithmetic optabs. */
5299 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5300 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5303 struct optab_handlers
*h
;
5306 h
= &o
->handlers
[j
];
5309 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5310 fprintf (stderr
, "%s\t%s:\t%s\n",
5311 GET_RTX_NAME (o
->code
),
5313 XSTR (h
->libfunc
, 0));
5317 /* Dump the conversion optabs. */
5318 for (i
= 0; i
< (int) COI_MAX
; ++i
)
5319 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5320 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5323 struct optab_handlers
*h
;
5325 o
= &convert_optab_table
[i
];
5326 h
= &o
->handlers
[j
][k
];
5329 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5330 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5331 GET_RTX_NAME (o
->code
),
5334 XSTR (h
->libfunc
, 0));
5342 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5343 CODE. Return 0 on failure. */
5346 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5347 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5349 enum machine_mode mode
= GET_MODE (op1
);
5350 enum insn_code icode
;
5353 if (!HAVE_conditional_trap
)
5356 if (mode
== VOIDmode
)
5359 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5360 if (icode
== CODE_FOR_nothing
)
5364 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5365 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5371 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5373 PUT_CODE (trap_rtx
, code
);
5374 gcc_assert (HAVE_conditional_trap
);
5375 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5379 insn
= get_insns ();
5386 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5387 or unsigned operation code. */
5389 static enum rtx_code
5390 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5402 code
= unsignedp
? LTU
: LT
;
5405 code
= unsignedp
? LEU
: LE
;
5408 code
= unsignedp
? GTU
: GT
;
5411 code
= unsignedp
? GEU
: GE
;
5414 case UNORDERED_EXPR
:
5445 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5446 unsigned operators. Do not generate compare instruction. */
5449 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5451 enum rtx_code rcode
;
5453 rtx rtx_op0
, rtx_op1
;
5455 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5456 ensures that condition is a relational operation. */
5457 gcc_assert (COMPARISON_CLASS_P (cond
));
5459 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5460 t_op0
= TREE_OPERAND (cond
, 0);
5461 t_op1
= TREE_OPERAND (cond
, 1);
5463 /* Expand operands. */
5464 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5465 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5467 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5468 && GET_MODE (rtx_op0
) != VOIDmode
)
5469 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5471 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5472 && GET_MODE (rtx_op1
) != VOIDmode
)
5473 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5475 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5478 /* Return insn code for VEC_COND_EXPR EXPR. */
5480 static inline enum insn_code
5481 get_vcond_icode (tree expr
, enum machine_mode mode
)
5483 enum insn_code icode
= CODE_FOR_nothing
;
5485 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5486 icode
= vcondu_gen_code
[mode
];
5488 icode
= vcond_gen_code
[mode
];
5492 /* Return TRUE iff, appropriate vector insns are available
5493 for vector cond expr expr in VMODE mode. */
5496 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5498 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5503 /* Generate insns for VEC_COND_EXPR. */
5506 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5508 enum insn_code icode
;
5509 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5510 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5511 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5513 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5514 if (icode
== CODE_FOR_nothing
)
5517 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5518 target
= gen_reg_rtx (mode
);
5520 /* Get comparison rtx. First expand both cond expr operands. */
5521 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5523 cc_op0
= XEXP (comparison
, 0);
5524 cc_op1
= XEXP (comparison
, 1);
5525 /* Expand both operands and force them in reg, if required. */
5526 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5527 NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5528 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5529 && mode
!= VOIDmode
)
5530 rtx_op1
= force_reg (mode
, rtx_op1
);
5532 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5533 NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
5534 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5535 && mode
!= VOIDmode
)
5536 rtx_op2
= force_reg (mode
, rtx_op2
);
5538 /* Emit instruction! */
5539 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5540 comparison
, cc_op0
, cc_op1
));
5546 /* This is an internal subroutine of the other compare_and_swap expanders.
5547 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5548 operation. TARGET is an optional place to store the value result of
5549 the operation. ICODE is the particular instruction to expand. Return
5550 the result of the operation. */
5553 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5554 rtx target
, enum insn_code icode
)
5556 enum machine_mode mode
= GET_MODE (mem
);
5559 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5560 target
= gen_reg_rtx (mode
);
5562 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5563 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5564 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5565 old_val
= force_reg (mode
, old_val
);
5567 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5568 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5569 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5570 new_val
= force_reg (mode
, new_val
);
5572 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5573 if (insn
== NULL_RTX
)
5580 /* Expand a compare-and-swap operation and return its value. */
5583 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5585 enum machine_mode mode
= GET_MODE (mem
);
5586 enum insn_code icode
= sync_compare_and_swap
[mode
];
5588 if (icode
== CODE_FOR_nothing
)
5591 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5594 /* Expand a compare-and-swap operation and store true into the result if
5595 the operation was successful and false otherwise. Return the result.
5596 Unlike other routines, TARGET is not optional. */
5599 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5601 enum machine_mode mode
= GET_MODE (mem
);
5602 enum insn_code icode
;
5603 rtx subtarget
, label0
, label1
;
5605 /* If the target supports a compare-and-swap pattern that simultaneously
5606 sets some flag for success, then use it. Otherwise use the regular
5607 compare-and-swap and follow that immediately with a compare insn. */
5608 icode
= sync_compare_and_swap_cc
[mode
];
5612 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5614 if (subtarget
!= NULL_RTX
)
5618 case CODE_FOR_nothing
:
5619 icode
= sync_compare_and_swap
[mode
];
5620 if (icode
== CODE_FOR_nothing
)
5623 /* Ensure that if old_val == mem, that we're not comparing
5624 against an old value. */
5625 if (MEM_P (old_val
))
5626 old_val
= force_reg (mode
, old_val
);
5628 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5630 if (subtarget
== NULL_RTX
)
5633 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5636 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5637 setcc instruction from the beginning. We don't work too hard here,
5638 but it's nice to not be stupid about initial code gen either. */
5639 if (STORE_FLAG_VALUE
== 1)
5641 icode
= setcc_gen_code
[EQ
];
5642 if (icode
!= CODE_FOR_nothing
)
5644 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5648 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5649 subtarget
= gen_reg_rtx (cmode
);
5651 insn
= GEN_FCN (icode
) (subtarget
);
5655 if (GET_MODE (target
) != GET_MODE (subtarget
))
5657 convert_move (target
, subtarget
, 1);
5665 /* Without an appropriate setcc instruction, use a set of branches to
5666 get 1 and 0 stored into target. Presumably if the target has a
5667 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5669 label0
= gen_label_rtx ();
5670 label1
= gen_label_rtx ();
5672 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
5673 emit_move_insn (target
, const0_rtx
);
5674 emit_jump_insn (gen_jump (label1
));
5676 emit_label (label0
);
5677 emit_move_insn (target
, const1_rtx
);
5678 emit_label (label1
);
5683 /* This is a helper function for the other atomic operations. This function
5684 emits a loop that contains SEQ that iterates until a compare-and-swap
5685 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5686 a set of instructions that takes a value from OLD_REG as an input and
5687 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5688 set to the current contents of MEM. After SEQ, a compare-and-swap will
5689 attempt to update MEM with NEW_REG. The function returns true when the
5690 loop was generated successfully. */
5693 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5695 enum machine_mode mode
= GET_MODE (mem
);
5696 enum insn_code icode
;
5697 rtx label
, cmp_reg
, subtarget
;
5699 /* The loop we want to generate looks like
5705 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5706 if (cmp_reg != old_reg)
5709 Note that we only do the plain load from memory once. Subsequent
5710 iterations use the value loaded by the compare-and-swap pattern. */
5712 label
= gen_label_rtx ();
5713 cmp_reg
= gen_reg_rtx (mode
);
5715 emit_move_insn (cmp_reg
, mem
);
5717 emit_move_insn (old_reg
, cmp_reg
);
5721 /* If the target supports a compare-and-swap pattern that simultaneously
5722 sets some flag for success, then use it. Otherwise use the regular
5723 compare-and-swap and follow that immediately with a compare insn. */
5724 icode
= sync_compare_and_swap_cc
[mode
];
5728 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5730 if (subtarget
!= NULL_RTX
)
5732 gcc_assert (subtarget
== cmp_reg
);
5737 case CODE_FOR_nothing
:
5738 icode
= sync_compare_and_swap
[mode
];
5739 if (icode
== CODE_FOR_nothing
)
5742 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5744 if (subtarget
== NULL_RTX
)
5746 if (subtarget
!= cmp_reg
)
5747 emit_move_insn (cmp_reg
, subtarget
);
5749 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
5752 /* ??? Mark this jump predicted not taken? */
5753 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
5758 /* This function generates the atomic operation MEM CODE= VAL. In this
5759 case, we do not care about any resulting value. Returns NULL if we
5760 cannot generate the operation. */
5763 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
5765 enum machine_mode mode
= GET_MODE (mem
);
5766 enum insn_code icode
;
5769 /* Look to see if the target supports the operation directly. */
5773 icode
= sync_add_optab
[mode
];
5776 icode
= sync_ior_optab
[mode
];
5779 icode
= sync_xor_optab
[mode
];
5782 icode
= sync_and_optab
[mode
];
5785 icode
= sync_nand_optab
[mode
];
5789 icode
= sync_sub_optab
[mode
];
5790 if (icode
== CODE_FOR_nothing
)
5792 icode
= sync_add_optab
[mode
];
5793 if (icode
!= CODE_FOR_nothing
)
5795 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5805 /* Generate the direct operation, if present. */
5806 if (icode
!= CODE_FOR_nothing
)
5808 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5809 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5810 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
5811 val
= force_reg (mode
, val
);
5813 insn
= GEN_FCN (icode
) (mem
, val
);
5821 /* Failing that, generate a compare-and-swap loop in which we perform the
5822 operation with normal arithmetic instructions. */
5823 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5825 rtx t0
= gen_reg_rtx (mode
), t1
;
5832 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5835 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5836 true, OPTAB_LIB_WIDEN
);
5838 insn
= get_insns ();
5841 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5848 /* This function generates the atomic operation MEM CODE= VAL. In this
5849 case, we do care about the resulting value: if AFTER is true then
5850 return the value MEM holds after the operation, if AFTER is false
5851 then return the value MEM holds before the operation. TARGET is an
5852 optional place for the result value to be stored. */
5855 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
5856 bool after
, rtx target
)
5858 enum machine_mode mode
= GET_MODE (mem
);
5859 enum insn_code old_code
, new_code
, icode
;
5863 /* Look to see if the target supports the operation directly. */
5867 old_code
= sync_old_add_optab
[mode
];
5868 new_code
= sync_new_add_optab
[mode
];
5871 old_code
= sync_old_ior_optab
[mode
];
5872 new_code
= sync_new_ior_optab
[mode
];
5875 old_code
= sync_old_xor_optab
[mode
];
5876 new_code
= sync_new_xor_optab
[mode
];
5879 old_code
= sync_old_and_optab
[mode
];
5880 new_code
= sync_new_and_optab
[mode
];
5883 old_code
= sync_old_nand_optab
[mode
];
5884 new_code
= sync_new_nand_optab
[mode
];
5888 old_code
= sync_old_sub_optab
[mode
];
5889 new_code
= sync_new_sub_optab
[mode
];
5890 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
5892 old_code
= sync_old_add_optab
[mode
];
5893 new_code
= sync_new_add_optab
[mode
];
5894 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
5896 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5906 /* If the target does supports the proper new/old operation, great. But
5907 if we only support the opposite old/new operation, check to see if we
5908 can compensate. In the case in which the old value is supported, then
5909 we can always perform the operation again with normal arithmetic. In
5910 the case in which the new value is supported, then we can only handle
5911 this in the case the operation is reversible. */
5916 if (icode
== CODE_FOR_nothing
)
5919 if (icode
!= CODE_FOR_nothing
)
5926 if (icode
== CODE_FOR_nothing
5927 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
5930 if (icode
!= CODE_FOR_nothing
)
5935 /* If we found something supported, great. */
5936 if (icode
!= CODE_FOR_nothing
)
5938 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5939 target
= gen_reg_rtx (mode
);
5941 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5942 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5943 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5944 val
= force_reg (mode
, val
);
5946 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5951 /* If we need to compensate for using an operation with the
5952 wrong return value, do so now. */
5959 else if (code
== MINUS
)
5964 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
5965 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
5966 true, OPTAB_LIB_WIDEN
);
5973 /* Failing that, generate a compare-and-swap loop in which we perform the
5974 operation with normal arithmetic instructions. */
5975 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5977 rtx t0
= gen_reg_rtx (mode
), t1
;
5979 if (!target
|| !register_operand (target
, mode
))
5980 target
= gen_reg_rtx (mode
);
5985 emit_move_insn (target
, t0
);
5989 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5992 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5993 true, OPTAB_LIB_WIDEN
);
5995 emit_move_insn (target
, t1
);
5997 insn
= get_insns ();
6000 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6007 /* This function expands a test-and-set operation. Ideally we atomically
6008 store VAL in MEM and return the previous value in MEM. Some targets
6009 may not support this operation and only support VAL with the constant 1;
6010 in this case while the return value will be 0/1, but the exact value
6011 stored in MEM is target defined. TARGET is an option place to stick
6012 the return value. */
6015 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
6017 enum machine_mode mode
= GET_MODE (mem
);
6018 enum insn_code icode
;
6021 /* If the target supports the test-and-set directly, great. */
6022 icode
= sync_lock_test_and_set
[mode
];
6023 if (icode
!= CODE_FOR_nothing
)
6025 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6026 target
= gen_reg_rtx (mode
);
6028 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6029 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6030 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6031 val
= force_reg (mode
, val
);
6033 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6041 /* Otherwise, use a compare-and-swap loop for the exchange. */
6042 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6044 if (!target
|| !register_operand (target
, mode
))
6045 target
= gen_reg_rtx (mode
);
6046 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6047 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6048 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6055 #include "gt-optabs.h"