1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
149 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
151 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code
) != RTX_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_UNARY
)
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target
, op0
)
179 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
181 insn
= PREV_INSN (last_insn
);
182 while (insn
!= NULL_RTX
)
184 if (reg_set_p (target
, insn
))
187 insn
= PREV_INSN (insn
);
191 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
192 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
194 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
196 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
209 int unsignedp
, int no_extend
)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
222 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
223 return convert_modes (mode
, oldmode
, op
, unsignedp
);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
228 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result
= gen_reg_rtx (mode
);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
235 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code
, tree type
)
257 return one_cmpl_optab
;
266 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
274 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
280 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
289 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
292 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
294 case REALIGN_LOAD_EXPR
:
295 return vec_realign_load_optab
;
298 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
301 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
303 case REDUC_PLUS_EXPR
:
304 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
306 case VEC_LSHIFT_EXPR
:
307 return vec_shl_optab
;
309 case VEC_RSHIFT_EXPR
:
310 return vec_shr_optab
;
316 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
320 return trapv
? addv_optab
: add_optab
;
323 return trapv
? subv_optab
: sub_optab
;
326 return trapv
? smulv_optab
: smul_optab
;
329 return trapv
? negv_optab
: neg_optab
;
332 return trapv
? absv_optab
: abs_optab
;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
353 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
355 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
356 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
357 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
358 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
361 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
363 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
364 != CODE_FOR_nothing
);
366 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
367 temp
= gen_reg_rtx (mode
);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
378 xop0
= convert_modes (mode0
,
379 GET_MODE (op0
) != VOIDmode
384 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
385 xop1
= convert_modes (mode1
,
386 GET_MODE (op1
) != VOIDmode
391 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
392 xop2
= convert_modes (mode2
,
393 GET_MODE (op2
) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
402 && mode0
!= VOIDmode
)
403 xop0
= copy_to_mode_reg (mode0
, xop0
);
405 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
406 && mode1
!= VOIDmode
)
407 xop1
= copy_to_mode_reg (mode1
, xop1
);
409 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
410 && mode2
!= VOIDmode
)
411 xop2
= copy_to_mode_reg (mode2
, xop2
);
413 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
426 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
427 enum optab_methods methods
)
429 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
430 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
432 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode
, optab binoptab
,
440 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
441 enum optab_methods methods
)
443 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
444 target
, unsignedp
, methods
);
448 emit_move_insn (target
, x
);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
457 enum insn_code icode
;
458 rtx rtx_op1
, rtx_op2
;
459 enum machine_mode mode1
;
460 enum machine_mode mode2
;
461 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
462 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
463 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
467 switch (TREE_CODE (vec_shift_expr
))
469 case VEC_RSHIFT_EXPR
:
470 shift_optab
= vec_shr_optab
;
472 case VEC_LSHIFT_EXPR
:
473 shift_optab
= vec_shl_optab
;
479 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
480 gcc_assert (icode
!= CODE_FOR_nothing
);
482 mode1
= insn_data
[icode
].operand
[1].mode
;
483 mode2
= insn_data
[icode
].operand
[2].mode
;
485 rtx_op1
= expand_expr (vec_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
486 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
487 && mode1
!= VOIDmode
)
488 rtx_op1
= force_reg (mode1
, rtx_op1
);
490 rtx_op2
= expand_expr (shift_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
491 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
492 && mode2
!= VOIDmode
)
493 rtx_op2
= force_reg (mode2
, rtx_op2
);
496 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
497 target
= gen_reg_rtx (mode
);
499 /* Emit instruction */
500 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
515 rtx outof_target
, rtx into_target
,
516 int unsignedp
, enum optab_methods methods
)
518 if (into_target
!= 0)
519 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
520 into_target
, unsignedp
, methods
))
523 if (outof_target
!= 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab
!= ashr_optab
)
528 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
530 if (!force_expand_binop (word_mode
, binoptab
,
531 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
532 outof_target
, unsignedp
, methods
))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
544 rtx outof_input
, rtx into_input
, rtx op1
,
545 rtx outof_target
, rtx into_target
,
546 int unsignedp
, enum optab_methods methods
,
547 unsigned HOST_WIDE_INT shift_mask
)
549 optab reverse_unsigned_shift
, unsigned_shift
;
552 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
553 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
560 carries
= outof_input
;
561 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
562 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
573 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
574 if (shift_mask
== BITS_PER_WORD
- 1)
576 tmp
= immed_double_const (-1, -1, op1_mode
);
577 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
582 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
583 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
587 if (tmp
== 0 || carries
== 0)
589 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
590 carries
, tmp
, 0, unsignedp
, methods
);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
597 into_target
, unsignedp
, methods
);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
603 into_target
, unsignedp
, methods
))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target
!= 0)
608 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
609 outof_target
, unsignedp
, methods
))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
625 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
626 rtx outof_input
, rtx into_input
,
627 rtx subword_op1
, rtx superword_op1
,
628 rtx outof_target
, rtx into_target
,
629 int unsignedp
, enum optab_methods methods
,
630 unsigned HOST_WIDE_INT shift_mask
)
632 rtx outof_superword
, into_superword
;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
637 if (outof_target
!= 0 && subword_op1
== superword_op1
)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword
= outof_target
;
642 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
643 outof_superword
, 0, unsignedp
, methods
))
648 into_superword
= gen_reg_rtx (word_mode
);
649 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
650 outof_superword
, into_superword
,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode
, binoptab
,
657 outof_input
, into_input
, subword_op1
,
658 outof_target
, into_target
,
659 unsignedp
, methods
, shift_mask
))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
665 into_target
, into_superword
, word_mode
, false))
668 if (outof_target
!= 0)
669 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
670 outof_target
, outof_superword
,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
709 rtx outof_input
, rtx into_input
, rtx op1
,
710 rtx outof_target
, rtx into_target
,
711 int unsignedp
, enum optab_methods methods
,
712 unsigned HOST_WIDE_INT shift_mask
)
714 rtx superword_op1
, tmp
, cmp1
, cmp2
;
715 rtx subword_label
, done_label
;
716 enum rtx_code cmp_code
;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask
>= BITS_PER_WORD
728 && !CONSTANT_P (op1
))
730 if (!expand_doubleword_shift (op1_mode
, binoptab
,
731 outof_input
, into_input
, op1
,
733 unsignedp
, methods
, shift_mask
))
735 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
736 outof_target
, unsignedp
, methods
))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
746 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
752 cmp2
= CONST0_RTX (op1_mode
);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
761 cmp2
= CONST0_RTX (op1_mode
);
763 superword_op1
= cmp1
;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
771 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
773 if (tmp
== const0_rtx
)
774 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
775 outof_target
, into_target
,
778 return expand_subword_shift (op1_mode
, binoptab
,
779 outof_input
, into_input
, op1
,
780 outof_target
, into_target
,
781 unsignedp
, methods
, shift_mask
);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start
= get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
789 cmp_code
, cmp1
, cmp2
,
790 outof_input
, into_input
,
792 outof_target
, into_target
,
793 unsignedp
, methods
, shift_mask
))
795 delete_insns_since (start
);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label
= gen_label_rtx ();
801 done_label
= gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
804 0, 0, subword_label
);
806 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
807 outof_target
, into_target
,
811 emit_jump_insn (gen_jump (done_label
));
813 emit_label (subword_label
);
815 if (!expand_subword_shift (op1_mode
, binoptab
,
816 outof_input
, into_input
, op1
,
817 outof_target
, into_target
,
818 unsignedp
, methods
, shift_mask
))
821 emit_label (done_label
);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
883 bool umulp
, enum optab_methods methods
)
885 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
886 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
887 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
888 rtx product
, adjust
, product_high
, temp
;
890 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
891 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
892 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
893 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
909 NULL_RTX
, 1, methods
);
911 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
912 NULL_RTX
, 0, OPTAB_DIRECT
);
915 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
916 NULL_RTX
, 0, methods
);
919 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
920 NULL_RTX
, 0, OPTAB_DIRECT
);
927 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
928 NULL_RTX
, 0, OPTAB_DIRECT
);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
938 NULL_RTX
, 1, methods
);
940 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
941 NULL_RTX
, 0, OPTAB_DIRECT
);
944 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
945 NULL_RTX
, 0, methods
);
948 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
949 NULL_RTX
, 0, OPTAB_DIRECT
);
956 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
957 NULL_RTX
, 0, OPTAB_DIRECT
);
961 /* OP1_HIGH should now be dead. */
963 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
964 adjust
, 0, OPTAB_DIRECT
);
966 if (target
&& !REG_P (target
))
970 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
971 target
, 1, OPTAB_DIRECT
);
973 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
974 target
, 1, OPTAB_DIRECT
);
979 product_high
= operand_subword (product
, high
, 1, mode
);
980 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
981 REG_P (product_high
) ? product_high
: adjust
,
983 emit_move_insn (product_high
, adjust
);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
992 rtx op1
, rtx target
, int unsignedp
,
993 enum optab_methods methods
)
995 optab binop
= code_to_optab
[(int) code
];
998 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1001 /* Generate code to perform an operation specified by BINOPTAB
1002 on operands OP0 and OP1, with result having machine-mode MODE.
1004 UNSIGNEDP is for the case where we have to widen the operands
1005 to perform the operation. It says to use zero-extension.
1007 If TARGET is nonzero, the value
1008 is generated there, if it is convenient to do so.
1009 In all cases an rtx is returned for the locus of the value;
1010 this may or may not be TARGET. */
1013 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1014 rtx target
, int unsignedp
, enum optab_methods methods
)
1016 enum optab_methods next_methods
1017 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1018 ? OPTAB_WIDEN
: methods
);
1019 enum mode_class
class;
1020 enum machine_mode wider_mode
;
1022 int commutative_op
= 0;
1023 int shift_op
= (binoptab
->code
== ASHIFT
1024 || binoptab
->code
== ASHIFTRT
1025 || binoptab
->code
== LSHIFTRT
1026 || binoptab
->code
== ROTATE
1027 || binoptab
->code
== ROTATERT
);
1028 rtx entry_last
= get_last_insn ();
1031 class = GET_MODE_CLASS (mode
);
1033 /* If subtracting an integer constant, convert this into an addition of
1034 the negated constant. */
1036 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1038 op1
= negate_rtx (mode
, op1
);
1039 binoptab
= add_optab
;
1042 /* If we are inside an appropriately-short loop and we are optimizing,
1043 force expensive constants into a register. */
1044 if (CONSTANT_P (op0
) && optimize
1045 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1047 if (GET_MODE (op0
) != VOIDmode
)
1048 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1049 op0
= force_reg (mode
, op0
);
1052 if (CONSTANT_P (op1
) && optimize
1053 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1055 if (GET_MODE (op1
) != VOIDmode
)
1056 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1057 op1
= force_reg (mode
, op1
);
1060 /* Record where to delete back to if we backtrack. */
1061 last
= get_last_insn ();
1063 /* If operation is commutative,
1064 try to make the first operand a register.
1065 Even better, try to make it the same as the target.
1066 Also try to make the last operand a constant. */
1067 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1068 || binoptab
== smul_widen_optab
1069 || binoptab
== umul_widen_optab
1070 || binoptab
== smul_highpart_optab
1071 || binoptab
== umul_highpart_optab
)
1075 if (((target
== 0 || REG_P (target
))
1079 : rtx_equal_p (op1
, target
))
1080 || GET_CODE (op0
) == CONST_INT
)
1088 /* If we can do it with a three-operand insn, do so. */
1090 if (methods
!= OPTAB_MUST_WIDEN
1091 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1093 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1094 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1095 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1097 rtx xop0
= op0
, xop1
= op1
;
1102 temp
= gen_reg_rtx (mode
);
1104 /* If it is a commutative operator and the modes would match
1105 if we would swap the operands, we can save the conversions. */
1108 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1109 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1113 tmp
= op0
; op0
= op1
; op1
= tmp
;
1114 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1118 /* In case the insn wants input operands in modes different from
1119 those of the actual operands, convert the operands. It would
1120 seem that we don't need to convert CONST_INTs, but we do, so
1121 that they're properly zero-extended, sign-extended or truncated
1124 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1125 xop0
= convert_modes (mode0
,
1126 GET_MODE (op0
) != VOIDmode
1131 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1132 xop1
= convert_modes (mode1
,
1133 GET_MODE (op1
) != VOIDmode
1138 /* Now, if insn's predicates don't allow our operands, put them into
1141 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1142 && mode0
!= VOIDmode
)
1143 xop0
= copy_to_mode_reg (mode0
, xop0
);
1145 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1146 && mode1
!= VOIDmode
)
1147 xop1
= copy_to_mode_reg (mode1
, xop1
);
1149 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
1150 temp
= gen_reg_rtx (mode
);
1152 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1155 /* If PAT is composed of more than one insn, try to add an appropriate
1156 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1157 operand, call ourselves again, this time without a target. */
1158 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1159 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1161 delete_insns_since (last
);
1162 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1163 unsignedp
, methods
);
1170 delete_insns_since (last
);
1173 /* If this is a multiply, see if we can do a widening operation that
1174 takes operands of this mode and makes a wider mode. */
1176 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1177 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1178 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1179 != CODE_FOR_nothing
))
1181 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1182 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1183 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1187 if (GET_MODE_CLASS (mode
) == MODE_INT
)
1188 return gen_lowpart (mode
, temp
);
1190 return convert_to_mode (mode
, temp
, unsignedp
);
1194 /* Look for a wider mode of the same class for which we think we
1195 can open-code the operation. Check for a widening multiply at the
1196 wider mode as well. */
1198 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1199 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1200 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1201 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1203 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1204 || (binoptab
== smul_optab
1205 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1206 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1207 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1208 != CODE_FOR_nothing
)))
1210 rtx xop0
= op0
, xop1
= op1
;
1213 /* For certain integer operations, we need not actually extend
1214 the narrow operands, as long as we will truncate
1215 the results to the same narrowness. */
1217 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1218 || binoptab
== xor_optab
1219 || binoptab
== add_optab
|| binoptab
== sub_optab
1220 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1221 && class == MODE_INT
)
1224 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1226 /* The second operand of a shift must always be extended. */
1227 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1228 no_extend
&& binoptab
!= ashl_optab
);
1230 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1231 unsignedp
, OPTAB_DIRECT
);
1234 if (class != MODE_INT
)
1237 target
= gen_reg_rtx (mode
);
1238 convert_move (target
, temp
, 0);
1242 return gen_lowpart (mode
, temp
);
1245 delete_insns_since (last
);
1249 /* These can be done a word at a time. */
1250 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1251 && class == MODE_INT
1252 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1253 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1259 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1260 won't be accurate, so use a new target. */
1261 if (target
== 0 || target
== op0
|| target
== op1
)
1262 target
= gen_reg_rtx (mode
);
1266 /* Do the actual arithmetic. */
1267 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1269 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1270 rtx x
= expand_binop (word_mode
, binoptab
,
1271 operand_subword_force (op0
, i
, mode
),
1272 operand_subword_force (op1
, i
, mode
),
1273 target_piece
, unsignedp
, next_methods
);
1278 if (target_piece
!= x
)
1279 emit_move_insn (target_piece
, x
);
1282 insns
= get_insns ();
1285 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1287 if (binoptab
->code
!= UNKNOWN
)
1289 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1290 copy_rtx (op0
), copy_rtx (op1
));
1294 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1299 /* Synthesize double word shifts from single word shifts. */
1300 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1301 || binoptab
== ashr_optab
)
1302 && class == MODE_INT
1303 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1304 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1305 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1306 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1307 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1309 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1310 enum machine_mode op1_mode
;
1312 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1313 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1314 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1316 /* Apply the truncation to constant shifts. */
1317 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1318 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1320 if (op1
== CONST0_RTX (op1_mode
))
1323 /* Make sure that this is a combination that expand_doubleword_shift
1324 can handle. See the comments there for details. */
1325 if (double_shift_mask
== 0
1326 || (shift_mask
== BITS_PER_WORD
- 1
1327 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1329 rtx insns
, equiv_value
;
1330 rtx into_target
, outof_target
;
1331 rtx into_input
, outof_input
;
1332 int left_shift
, outof_word
;
1334 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1335 won't be accurate, so use a new target. */
1336 if (target
== 0 || target
== op0
|| target
== op1
)
1337 target
= gen_reg_rtx (mode
);
1341 /* OUTOF_* is the word we are shifting bits away from, and
1342 INTO_* is the word that we are shifting bits towards, thus
1343 they differ depending on the direction of the shift and
1344 WORDS_BIG_ENDIAN. */
1346 left_shift
= binoptab
== ashl_optab
;
1347 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1349 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1350 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1352 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1353 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1355 if (expand_doubleword_shift (op1_mode
, binoptab
,
1356 outof_input
, into_input
, op1
,
1357 outof_target
, into_target
,
1358 unsignedp
, methods
, shift_mask
))
1360 insns
= get_insns ();
1363 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1364 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1371 /* Synthesize double word rotates from single word shifts. */
1372 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1373 && class == MODE_INT
1374 && GET_CODE (op1
) == CONST_INT
1375 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1376 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1377 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1379 rtx insns
, equiv_value
;
1380 rtx into_target
, outof_target
;
1381 rtx into_input
, outof_input
;
1383 int shift_count
, left_shift
, outof_word
;
1385 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1386 won't be accurate, so use a new target. Do this also if target is not
1387 a REG, first because having a register instead may open optimization
1388 opportunities, and second because if target and op0 happen to be MEMs
1389 designating the same location, we would risk clobbering it too early
1390 in the code sequence we generate below. */
1391 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1392 target
= gen_reg_rtx (mode
);
1396 shift_count
= INTVAL (op1
);
1398 /* OUTOF_* is the word we are shifting bits away from, and
1399 INTO_* is the word that we are shifting bits towards, thus
1400 they differ depending on the direction of the shift and
1401 WORDS_BIG_ENDIAN. */
1403 left_shift
= (binoptab
== rotl_optab
);
1404 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1406 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1407 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1409 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1410 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1412 if (shift_count
== BITS_PER_WORD
)
1414 /* This is just a word swap. */
1415 emit_move_insn (outof_target
, into_input
);
1416 emit_move_insn (into_target
, outof_input
);
1421 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1422 rtx first_shift_count
, second_shift_count
;
1423 optab reverse_unsigned_shift
, unsigned_shift
;
1425 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1426 ? lshr_optab
: ashl_optab
);
1428 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1429 ? ashl_optab
: lshr_optab
);
1431 if (shift_count
> BITS_PER_WORD
)
1433 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1434 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1438 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1439 second_shift_count
= GEN_INT (shift_count
);
1442 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1443 outof_input
, first_shift_count
,
1444 NULL_RTX
, unsignedp
, next_methods
);
1445 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1446 into_input
, second_shift_count
,
1447 NULL_RTX
, unsignedp
, next_methods
);
1449 if (into_temp1
!= 0 && into_temp2
!= 0)
1450 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1451 into_target
, unsignedp
, next_methods
);
1455 if (inter
!= 0 && inter
!= into_target
)
1456 emit_move_insn (into_target
, inter
);
1458 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1459 into_input
, first_shift_count
,
1460 NULL_RTX
, unsignedp
, next_methods
);
1461 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1462 outof_input
, second_shift_count
,
1463 NULL_RTX
, unsignedp
, next_methods
);
1465 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1466 inter
= expand_binop (word_mode
, ior_optab
,
1467 outof_temp1
, outof_temp2
,
1468 outof_target
, unsignedp
, next_methods
);
1470 if (inter
!= 0 && inter
!= outof_target
)
1471 emit_move_insn (outof_target
, inter
);
1474 insns
= get_insns ();
1479 if (binoptab
->code
!= UNKNOWN
)
1480 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1484 /* We can't make this a no conflict block if this is a word swap,
1485 because the word swap case fails if the input and output values
1486 are in the same register. */
1487 if (shift_count
!= BITS_PER_WORD
)
1488 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1497 /* These can be done a word at a time by propagating carries. */
1498 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1499 && class == MODE_INT
1500 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1501 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1504 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1505 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1506 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1507 rtx xop0
, xop1
, xtarget
;
1509 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1510 value is one of those, use it. Otherwise, use 1 since it is the
1511 one easiest to get. */
1512 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1513 int normalizep
= STORE_FLAG_VALUE
;
1518 /* Prepare the operands. */
1519 xop0
= force_reg (mode
, op0
);
1520 xop1
= force_reg (mode
, op1
);
1522 xtarget
= gen_reg_rtx (mode
);
1524 if (target
== 0 || !REG_P (target
))
1527 /* Indicate for flow that the entire target reg is being set. */
1529 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1531 /* Do the actual arithmetic. */
1532 for (i
= 0; i
< nwords
; i
++)
1534 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1535 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1536 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1537 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1540 /* Main add/subtract of the input operands. */
1541 x
= expand_binop (word_mode
, binoptab
,
1542 op0_piece
, op1_piece
,
1543 target_piece
, unsignedp
, next_methods
);
1549 /* Store carry from main add/subtract. */
1550 carry_out
= gen_reg_rtx (word_mode
);
1551 carry_out
= emit_store_flag_force (carry_out
,
1552 (binoptab
== add_optab
1555 word_mode
, 1, normalizep
);
1562 /* Add/subtract previous carry to main result. */
1563 newx
= expand_binop (word_mode
,
1564 normalizep
== 1 ? binoptab
: otheroptab
,
1566 NULL_RTX
, 1, next_methods
);
1570 /* Get out carry from adding/subtracting carry in. */
1571 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1572 carry_tmp
= emit_store_flag_force (carry_tmp
,
1573 (binoptab
== add_optab
1576 word_mode
, 1, normalizep
);
1578 /* Logical-ior the two poss. carry together. */
1579 carry_out
= expand_binop (word_mode
, ior_optab
,
1580 carry_out
, carry_tmp
,
1581 carry_out
, 0, next_methods
);
1585 emit_move_insn (target_piece
, newx
);
1589 if (x
!= target_piece
)
1590 emit_move_insn (target_piece
, x
);
1593 carry_in
= carry_out
;
1596 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1598 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1599 || ! rtx_equal_p (target
, xtarget
))
1601 rtx temp
= emit_move_insn (target
, xtarget
);
1603 set_unique_reg_note (temp
,
1605 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1616 delete_insns_since (last
);
1619 /* Attempt to synthesize double word multiplies using a sequence of word
1620 mode multiplications. We first attempt to generate a sequence using a
1621 more efficient unsigned widening multiply, and if that fails we then
1622 try using a signed widening multiply. */
1624 if (binoptab
== smul_optab
1625 && class == MODE_INT
1626 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1627 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1628 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1630 rtx product
= NULL_RTX
;
1632 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1633 != CODE_FOR_nothing
)
1635 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1638 delete_insns_since (last
);
1641 if (product
== NULL_RTX
1642 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1643 != CODE_FOR_nothing
)
1645 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1648 delete_insns_since (last
);
1651 if (product
!= NULL_RTX
)
1653 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1655 temp
= emit_move_insn (target
? target
: product
, product
);
1656 set_unique_reg_note (temp
,
1658 gen_rtx_fmt_ee (MULT
, mode
,
1666 /* It can't be open-coded in this mode.
1667 Use a library call if one is available and caller says that's ok. */
1669 if (binoptab
->handlers
[(int) mode
].libfunc
1670 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1674 enum machine_mode op1_mode
= mode
;
1681 op1_mode
= word_mode
;
1682 /* Specify unsigned here,
1683 since negative shift counts are meaningless. */
1684 op1x
= convert_to_mode (word_mode
, op1
, 1);
1687 if (GET_MODE (op0
) != VOIDmode
1688 && GET_MODE (op0
) != mode
)
1689 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1691 /* Pass 1 for NO_QUEUE so we don't lose any increments
1692 if the libcall is cse'd or moved. */
1693 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1694 NULL_RTX
, LCT_CONST
, mode
, 2,
1695 op0
, mode
, op1x
, op1_mode
);
1697 insns
= get_insns ();
1700 target
= gen_reg_rtx (mode
);
1701 emit_libcall_block (insns
, target
, value
,
1702 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1707 delete_insns_since (last
);
1709 /* It can't be done in this mode. Can we do it in a wider mode? */
1711 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1712 || methods
== OPTAB_MUST_WIDEN
))
1714 /* Caller says, don't even try. */
1715 delete_insns_since (entry_last
);
1719 /* Compute the value of METHODS to pass to recursive calls.
1720 Don't allow widening to be tried recursively. */
1722 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1724 /* Look for a wider mode of the same class for which it appears we can do
1727 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1729 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1730 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1732 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1733 != CODE_FOR_nothing
)
1734 || (methods
== OPTAB_LIB
1735 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1737 rtx xop0
= op0
, xop1
= op1
;
1740 /* For certain integer operations, we need not actually extend
1741 the narrow operands, as long as we will truncate
1742 the results to the same narrowness. */
1744 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1745 || binoptab
== xor_optab
1746 || binoptab
== add_optab
|| binoptab
== sub_optab
1747 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1748 && class == MODE_INT
)
1751 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1752 unsignedp
, no_extend
);
1754 /* The second operand of a shift must always be extended. */
1755 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1756 no_extend
&& binoptab
!= ashl_optab
);
1758 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1759 unsignedp
, methods
);
1762 if (class != MODE_INT
)
1765 target
= gen_reg_rtx (mode
);
1766 convert_move (target
, temp
, 0);
1770 return gen_lowpart (mode
, temp
);
1773 delete_insns_since (last
);
1778 delete_insns_since (entry_last
);
1782 /* Expand a binary operator which has both signed and unsigned forms.
1783 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1786 If we widen unsigned operands, we may use a signed wider operation instead
1787 of an unsigned wider operation, since the result would be the same. */
1790 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1791 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1792 enum optab_methods methods
)
1795 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1796 struct optab wide_soptab
;
1798 /* Do it without widening, if possible. */
1799 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1800 unsignedp
, OPTAB_DIRECT
);
1801 if (temp
|| methods
== OPTAB_DIRECT
)
1804 /* Try widening to a signed int. Make a fake signed optab that
1805 hides any signed insn for direct use. */
1806 wide_soptab
= *soptab
;
1807 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1808 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1810 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1811 unsignedp
, OPTAB_WIDEN
);
1813 /* For unsigned operands, try widening to an unsigned int. */
1814 if (temp
== 0 && unsignedp
)
1815 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1816 unsignedp
, OPTAB_WIDEN
);
1817 if (temp
|| methods
== OPTAB_WIDEN
)
1820 /* Use the right width lib call if that exists. */
1821 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1822 if (temp
|| methods
== OPTAB_LIB
)
1825 /* Must widen and use a lib call, use either signed or unsigned. */
1826 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1827 unsignedp
, methods
);
1831 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1832 unsignedp
, methods
);
1836 /* Generate code to perform an operation specified by UNOPPTAB
1837 on operand OP0, with two results to TARG0 and TARG1.
1838 We assume that the order of the operands for the instruction
1839 is TARG0, TARG1, OP0.
1841 Either TARG0 or TARG1 may be zero, but what that means is that
1842 the result is not actually wanted. We will generate it into
1843 a dummy pseudo-reg and discard it. They may not both be zero.
1845 Returns 1 if this operation can be performed; 0 if not. */
1848 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1851 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1852 enum mode_class
class;
1853 enum machine_mode wider_mode
;
1854 rtx entry_last
= get_last_insn ();
1857 class = GET_MODE_CLASS (mode
);
1860 targ0
= gen_reg_rtx (mode
);
1862 targ1
= gen_reg_rtx (mode
);
1864 /* Record where to go back to if we fail. */
1865 last
= get_last_insn ();
1867 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1869 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1870 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1874 if (GET_MODE (xop0
) != VOIDmode
1875 && GET_MODE (xop0
) != mode0
)
1876 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1878 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1879 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
1880 xop0
= copy_to_mode_reg (mode0
, xop0
);
1882 /* We could handle this, but we should always be called with a pseudo
1883 for our targets and all insns should take them as outputs. */
1884 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
1885 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
1887 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1894 delete_insns_since (last
);
1897 /* It can't be done in this mode. Can we do it in a wider mode? */
1899 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1901 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1902 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1904 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1905 != CODE_FOR_nothing
)
1907 rtx t0
= gen_reg_rtx (wider_mode
);
1908 rtx t1
= gen_reg_rtx (wider_mode
);
1909 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1911 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1913 convert_move (targ0
, t0
, unsignedp
);
1914 convert_move (targ1
, t1
, unsignedp
);
1918 delete_insns_since (last
);
1923 delete_insns_since (entry_last
);
1927 /* Generate code to perform an operation specified by BINOPTAB
1928 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1929 We assume that the order of the operands for the instruction
1930 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1931 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1933 Either TARG0 or TARG1 may be zero, but what that means is that
1934 the result is not actually wanted. We will generate it into
1935 a dummy pseudo-reg and discard it. They may not both be zero.
1937 Returns 1 if this operation can be performed; 0 if not. */
1940 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1943 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1944 enum mode_class
class;
1945 enum machine_mode wider_mode
;
1946 rtx entry_last
= get_last_insn ();
1949 class = GET_MODE_CLASS (mode
);
1951 /* If we are inside an appropriately-short loop and we are optimizing,
1952 force expensive constants into a register. */
1953 if (CONSTANT_P (op0
) && optimize
1954 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1955 op0
= force_reg (mode
, op0
);
1957 if (CONSTANT_P (op1
) && optimize
1958 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1959 op1
= force_reg (mode
, op1
);
1962 targ0
= gen_reg_rtx (mode
);
1964 targ1
= gen_reg_rtx (mode
);
1966 /* Record where to go back to if we fail. */
1967 last
= get_last_insn ();
1969 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1971 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1972 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1973 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1975 rtx xop0
= op0
, xop1
= op1
;
1977 /* In case the insn wants input operands in modes different from
1978 those of the actual operands, convert the operands. It would
1979 seem that we don't need to convert CONST_INTs, but we do, so
1980 that they're properly zero-extended, sign-extended or truncated
1983 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1984 xop0
= convert_modes (mode0
,
1985 GET_MODE (op0
) != VOIDmode
1990 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1991 xop1
= convert_modes (mode1
,
1992 GET_MODE (op1
) != VOIDmode
1997 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1998 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
1999 xop0
= copy_to_mode_reg (mode0
, xop0
);
2001 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2002 xop1
= copy_to_mode_reg (mode1
, xop1
);
2004 /* We could handle this, but we should always be called with a pseudo
2005 for our targets and all insns should take them as outputs. */
2006 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2007 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2009 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2016 delete_insns_since (last
);
2019 /* It can't be done in this mode. Can we do it in a wider mode? */
2021 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2023 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2024 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2026 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2027 != CODE_FOR_nothing
)
2029 rtx t0
= gen_reg_rtx (wider_mode
);
2030 rtx t1
= gen_reg_rtx (wider_mode
);
2031 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2032 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2034 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2037 convert_move (targ0
, t0
, unsignedp
);
2038 convert_move (targ1
, t1
, unsignedp
);
2042 delete_insns_since (last
);
2047 delete_insns_since (entry_last
);
2051 /* Expand the two-valued library call indicated by BINOPTAB, but
2052 preserve only one of the values. If TARG0 is non-NULL, the first
2053 value is placed into TARG0; otherwise the second value is placed
2054 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2055 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2056 This routine assumes that the value returned by the library call is
2057 as if the return value was of an integral mode twice as wide as the
2058 mode of OP0. Returns 1 if the call was successful. */
2061 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2062 rtx targ0
, rtx targ1
, enum rtx_code code
)
2064 enum machine_mode mode
;
2065 enum machine_mode libval_mode
;
2069 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2070 gcc_assert (!targ0
!= !targ1
);
2072 mode
= GET_MODE (op0
);
2073 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2076 /* The value returned by the library function will have twice as
2077 many bits as the nominal MODE. */
2078 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2081 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2082 NULL_RTX
, LCT_CONST
,
2086 /* Get the part of VAL containing the value that we want. */
2087 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2088 targ0
? 0 : GET_MODE_SIZE (mode
));
2089 insns
= get_insns ();
2091 /* Move the into the desired location. */
2092 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2093 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2099 /* Wrapper around expand_unop which takes an rtx code to specify
2100 the operation to perform, not an optab pointer. All other
2101 arguments are the same. */
2103 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2104 rtx target
, int unsignedp
)
2106 optab unop
= code_to_optab
[(int) code
];
2109 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2115 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2117 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2119 enum mode_class
class = GET_MODE_CLASS (mode
);
2120 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2122 enum machine_mode wider_mode
;
2123 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2124 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2126 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2127 != CODE_FOR_nothing
)
2129 rtx xop0
, temp
, last
;
2131 last
= get_last_insn ();
2134 target
= gen_reg_rtx (mode
);
2135 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2136 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2138 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2139 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2140 - GET_MODE_BITSIZE (mode
)),
2141 target
, true, OPTAB_DIRECT
);
2143 delete_insns_since (last
);
2152 /* Try calculating (parity x) as (and (popcount x) 1), where
2153 popcount can also be done in a wider mode. */
2155 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2157 enum mode_class
class = GET_MODE_CLASS (mode
);
2158 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2160 enum machine_mode wider_mode
;
2161 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2162 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2164 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2165 != CODE_FOR_nothing
)
2167 rtx xop0
, temp
, last
;
2169 last
= get_last_insn ();
2172 target
= gen_reg_rtx (mode
);
2173 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2174 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2177 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2178 target
, true, OPTAB_DIRECT
);
2180 delete_insns_since (last
);
2189 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2190 conditions, VAL may already be a SUBREG against which we cannot generate
2191 a further SUBREG. In this case, we expect forcing the value into a
2192 register will work around the situation. */
2195 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2196 enum machine_mode imode
)
2199 ret
= lowpart_subreg (omode
, val
, imode
);
2202 val
= force_reg (imode
, val
);
2203 ret
= lowpart_subreg (omode
, val
, imode
);
2204 gcc_assert (ret
!= NULL
);
2209 /* Expand a floating point absolute value or negation operation via a
2210 logical operation on the sign bit. */
2213 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2214 rtx op0
, rtx target
)
2216 const struct real_format
*fmt
;
2217 int bitpos
, word
, nwords
, i
;
2218 enum machine_mode imode
;
2219 HOST_WIDE_INT hi
, lo
;
2222 /* The format has to have a simple sign bit. */
2223 fmt
= REAL_MODE_FORMAT (mode
);
2227 bitpos
= fmt
->signbit_rw
;
2231 /* Don't create negative zeros if the format doesn't support them. */
2232 if (code
== NEG
&& !fmt
->has_signed_zero
)
2235 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2237 imode
= int_mode_for_mode (mode
);
2238 if (imode
== BLKmode
)
2247 if (FLOAT_WORDS_BIG_ENDIAN
)
2248 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2250 word
= bitpos
/ BITS_PER_WORD
;
2251 bitpos
= bitpos
% BITS_PER_WORD
;
2252 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2255 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2258 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2262 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2268 if (target
== 0 || target
== op0
)
2269 target
= gen_reg_rtx (mode
);
2275 for (i
= 0; i
< nwords
; ++i
)
2277 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2278 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2282 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2284 immed_double_const (lo
, hi
, imode
),
2285 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2286 if (temp
!= targ_piece
)
2287 emit_move_insn (targ_piece
, temp
);
2290 emit_move_insn (targ_piece
, op0_piece
);
2293 insns
= get_insns ();
2296 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2297 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2301 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2302 gen_lowpart (imode
, op0
),
2303 immed_double_const (lo
, hi
, imode
),
2304 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2305 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2307 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2308 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2314 /* Generate code to perform an operation specified by UNOPTAB
2315 on operand OP0, with result having machine-mode MODE.
2317 UNSIGNEDP is for the case where we have to widen the operands
2318 to perform the operation. It says to use zero-extension.
2320 If TARGET is nonzero, the value
2321 is generated there, if it is convenient to do so.
2322 In all cases an rtx is returned for the locus of the value;
2323 this may or may not be TARGET. */
2326 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2329 enum mode_class
class;
2330 enum machine_mode wider_mode
;
2332 rtx last
= get_last_insn ();
2335 class = GET_MODE_CLASS (mode
);
2337 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2339 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2340 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2346 temp
= gen_reg_rtx (mode
);
2348 if (GET_MODE (xop0
) != VOIDmode
2349 && GET_MODE (xop0
) != mode0
)
2350 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2352 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2354 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2355 xop0
= copy_to_mode_reg (mode0
, xop0
);
2357 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2358 temp
= gen_reg_rtx (mode
);
2360 pat
= GEN_FCN (icode
) (temp
, xop0
);
2363 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2364 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2366 delete_insns_since (last
);
2367 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2375 delete_insns_since (last
);
2378 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2380 /* Widening clz needs special treatment. */
2381 if (unoptab
== clz_optab
)
2383 temp
= widen_clz (mode
, op0
, target
);
2390 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2391 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2392 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2394 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2398 /* For certain operations, we need not actually extend
2399 the narrow operand, as long as we will truncate the
2400 results to the same narrowness. */
2402 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2403 (unoptab
== neg_optab
2404 || unoptab
== one_cmpl_optab
)
2405 && class == MODE_INT
);
2407 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2412 if (class != MODE_INT
)
2415 target
= gen_reg_rtx (mode
);
2416 convert_move (target
, temp
, 0);
2420 return gen_lowpart (mode
, temp
);
2423 delete_insns_since (last
);
2427 /* These can be done a word at a time. */
2428 if (unoptab
== one_cmpl_optab
2429 && class == MODE_INT
2430 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2431 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2436 if (target
== 0 || target
== op0
)
2437 target
= gen_reg_rtx (mode
);
2441 /* Do the actual arithmetic. */
2442 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2444 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2445 rtx x
= expand_unop (word_mode
, unoptab
,
2446 operand_subword_force (op0
, i
, mode
),
2447 target_piece
, unsignedp
);
2449 if (target_piece
!= x
)
2450 emit_move_insn (target_piece
, x
);
2453 insns
= get_insns ();
2456 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2457 gen_rtx_fmt_e (unoptab
->code
, mode
,
2462 if (unoptab
->code
== NEG
)
2464 /* Try negating floating point values by flipping the sign bit. */
2465 if (class == MODE_FLOAT
)
2467 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2472 /* If there is no negation pattern, and we have no negative zero,
2473 try subtracting from zero. */
2474 if (!HONOR_SIGNED_ZEROS (mode
))
2476 temp
= expand_binop (mode
, (unoptab
== negv_optab
2477 ? subv_optab
: sub_optab
),
2478 CONST0_RTX (mode
), op0
, target
,
2479 unsignedp
, OPTAB_DIRECT
);
2485 /* Try calculating parity (x) as popcount (x) % 2. */
2486 if (unoptab
== parity_optab
)
2488 temp
= expand_parity (mode
, op0
, target
);
2494 /* Now try a library call in this mode. */
2495 if (unoptab
->handlers
[(int) mode
].libfunc
)
2499 enum machine_mode outmode
= mode
;
2501 /* All of these functions return small values. Thus we choose to
2502 have them return something that isn't a double-word. */
2503 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2504 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2506 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2510 /* Pass 1 for NO_QUEUE so we don't lose any increments
2511 if the libcall is cse'd or moved. */
2512 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2513 NULL_RTX
, LCT_CONST
, outmode
,
2515 insns
= get_insns ();
2518 target
= gen_reg_rtx (outmode
);
2519 emit_libcall_block (insns
, target
, value
,
2520 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2525 /* It can't be done in this mode. Can we do it in a wider mode? */
2527 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2529 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2530 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2532 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2533 != CODE_FOR_nothing
)
2534 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2538 /* For certain operations, we need not actually extend
2539 the narrow operand, as long as we will truncate the
2540 results to the same narrowness. */
2542 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2543 (unoptab
== neg_optab
2544 || unoptab
== one_cmpl_optab
)
2545 && class == MODE_INT
);
2547 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2550 /* If we are generating clz using wider mode, adjust the
2552 if (unoptab
== clz_optab
&& temp
!= 0)
2553 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2554 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2555 - GET_MODE_BITSIZE (mode
)),
2556 target
, true, OPTAB_DIRECT
);
2560 if (class != MODE_INT
)
2563 target
= gen_reg_rtx (mode
);
2564 convert_move (target
, temp
, 0);
2568 return gen_lowpart (mode
, temp
);
2571 delete_insns_since (last
);
2576 /* One final attempt at implementing negation via subtraction,
2577 this time allowing widening of the operand. */
2578 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2581 temp
= expand_binop (mode
,
2582 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2583 CONST0_RTX (mode
), op0
,
2584 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2592 /* Emit code to compute the absolute value of OP0, with result to
2593 TARGET if convenient. (TARGET may be 0.) The return value says
2594 where the result actually is to be found.
2596 MODE is the mode of the operand; the mode of the result is
2597 different but can be deduced from MODE.
2602 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2603 int result_unsignedp
)
2608 result_unsignedp
= 1;
2610 /* First try to do it with a special abs instruction. */
2611 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2616 /* For floating point modes, try clearing the sign bit. */
2617 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2619 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2624 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2625 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2626 && !HONOR_SIGNED_ZEROS (mode
))
2628 rtx last
= get_last_insn ();
2630 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2632 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2638 delete_insns_since (last
);
2641 /* If this machine has expensive jumps, we can do integer absolute
2642 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2643 where W is the width of MODE. */
2645 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2647 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2648 size_int (GET_MODE_BITSIZE (mode
) - 1),
2651 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2654 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2655 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2665 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2666 int result_unsignedp
, int safe
)
2671 result_unsignedp
= 1;
2673 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2677 /* If that does not win, use conditional jump and negate. */
2679 /* It is safe to use the target if it is the same
2680 as the source if this is also a pseudo register */
2681 if (op0
== target
&& REG_P (op0
)
2682 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2685 op1
= gen_label_rtx ();
2686 if (target
== 0 || ! safe
2687 || GET_MODE (target
) != mode
2688 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2690 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2691 target
= gen_reg_rtx (mode
);
2693 emit_move_insn (target
, op0
);
2696 /* If this mode is an integer too wide to compare properly,
2697 compare word by word. Rely on CSE to optimize constant cases. */
2698 if (GET_MODE_CLASS (mode
) == MODE_INT
2699 && ! can_compare_p (GE
, mode
, ccp_jump
))
2700 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2703 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2704 NULL_RTX
, NULL_RTX
, op1
);
2706 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2709 emit_move_insn (target
, op0
);
2715 /* A subroutine of expand_copysign, perform the copysign operation using the
2716 abs and neg primitives advertised to exist on the target. The assumption
2717 is that we have a split register file, and leaving op0 in fp registers,
2718 and not playing with subregs so much, will help the register allocator. */
2721 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2722 int bitpos
, bool op0_is_abs
)
2724 enum machine_mode imode
;
2725 HOST_WIDE_INT hi
, lo
;
2734 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2741 if (target
== NULL_RTX
)
2742 target
= copy_to_reg (op0
);
2744 emit_move_insn (target
, op0
);
2747 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2749 imode
= int_mode_for_mode (mode
);
2750 if (imode
== BLKmode
)
2752 op1
= gen_lowpart (imode
, op1
);
2757 if (FLOAT_WORDS_BIG_ENDIAN
)
2758 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2760 word
= bitpos
/ BITS_PER_WORD
;
2761 bitpos
= bitpos
% BITS_PER_WORD
;
2762 op1
= operand_subword_force (op1
, word
, mode
);
2765 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2768 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2772 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2776 op1
= expand_binop (imode
, and_optab
, op1
,
2777 immed_double_const (lo
, hi
, imode
),
2778 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2780 label
= gen_label_rtx ();
2781 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2783 if (GET_CODE (op0
) == CONST_DOUBLE
)
2784 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2786 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2788 emit_move_insn (target
, op0
);
2796 /* A subroutine of expand_copysign, perform the entire copysign operation
2797 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2798 is true if op0 is known to have its sign bit clear. */
2801 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2802 int bitpos
, bool op0_is_abs
)
2804 enum machine_mode imode
;
2805 HOST_WIDE_INT hi
, lo
;
2806 int word
, nwords
, i
;
2809 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2811 imode
= int_mode_for_mode (mode
);
2812 if (imode
== BLKmode
)
2821 if (FLOAT_WORDS_BIG_ENDIAN
)
2822 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2824 word
= bitpos
/ BITS_PER_WORD
;
2825 bitpos
= bitpos
% BITS_PER_WORD
;
2826 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2829 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2832 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2836 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2840 if (target
== 0 || target
== op0
|| target
== op1
)
2841 target
= gen_reg_rtx (mode
);
2847 for (i
= 0; i
< nwords
; ++i
)
2849 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2850 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2855 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2856 immed_double_const (~lo
, ~hi
, imode
),
2857 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2859 op1
= expand_binop (imode
, and_optab
,
2860 operand_subword_force (op1
, i
, mode
),
2861 immed_double_const (lo
, hi
, imode
),
2862 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2864 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2865 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2866 if (temp
!= targ_piece
)
2867 emit_move_insn (targ_piece
, temp
);
2870 emit_move_insn (targ_piece
, op0_piece
);
2873 insns
= get_insns ();
2876 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2880 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2881 immed_double_const (lo
, hi
, imode
),
2882 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2884 op0
= gen_lowpart (imode
, op0
);
2886 op0
= expand_binop (imode
, and_optab
, op0
,
2887 immed_double_const (~lo
, ~hi
, imode
),
2888 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2890 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2891 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2892 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2898 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2899 scalar floating point mode. Return NULL if we do not know how to
2900 expand the operation inline. */
2903 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2905 enum machine_mode mode
= GET_MODE (op0
);
2906 const struct real_format
*fmt
;
2910 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2911 gcc_assert (GET_MODE (op1
) == mode
);
2913 /* First try to do it with a special instruction. */
2914 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2915 target
, 0, OPTAB_DIRECT
);
2919 fmt
= REAL_MODE_FORMAT (mode
);
2920 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2924 if (GET_CODE (op0
) == CONST_DOUBLE
)
2926 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2927 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2931 if (fmt
->signbit_ro
>= 0
2932 && (GET_CODE (op0
) == CONST_DOUBLE
2933 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2934 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2936 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2937 fmt
->signbit_ro
, op0_is_abs
);
2942 if (fmt
->signbit_rw
< 0)
2944 return expand_copysign_bit (mode
, op0
, op1
, target
,
2945 fmt
->signbit_rw
, op0_is_abs
);
2948 /* Generate an instruction whose insn-code is INSN_CODE,
2949 with two operands: an output TARGET and an input OP0.
2950 TARGET *must* be nonzero, and the output is always stored there.
2951 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2952 the value that is stored into TARGET. */
2955 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2958 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2963 /* Now, if insn does not accept our operands, put them into pseudos. */
2965 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
2966 op0
= copy_to_mode_reg (mode0
, op0
);
2968 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
2969 temp
= gen_reg_rtx (GET_MODE (temp
));
2971 pat
= GEN_FCN (icode
) (temp
, op0
);
2973 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2974 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2979 emit_move_insn (target
, temp
);
2982 struct no_conflict_data
2984 rtx target
, first
, insn
;
2988 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
2989 if the currently examined clobber / store has to stay in the list of
2990 insns that constitute the actual no_conflict block. */
2992 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
2994 struct no_conflict_data
*p
= p0
;
2996 /* If this inns directly contributes to setting the target, it must stay. */
2997 if (reg_overlap_mentioned_p (p
->target
, dest
))
2998 p
->must_stay
= true;
2999 /* If we haven't committed to keeping any other insns in the list yet,
3000 there is nothing more to check. */
3001 else if (p
->insn
== p
->first
)
3003 /* If this insn sets / clobbers a register that feeds one of the insns
3004 already in the list, this insn has to stay too. */
3005 else if (reg_mentioned_p (dest
, PATTERN (p
->first
))
3006 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3007 /* Likewise if this insn depends on a register set by a previous
3008 insn in the list. */
3009 || (GET_CODE (set
) == SET
3010 && (modified_in_p (SET_SRC (set
), p
->first
)
3011 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
))))
3012 p
->must_stay
= true;
3015 /* Emit code to perform a series of operations on a multi-word quantity, one
3018 Such a block is preceded by a CLOBBER of the output, consists of multiple
3019 insns, each setting one word of the output, and followed by a SET copying
3020 the output to itself.
3022 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3023 note indicating that it doesn't conflict with the (also multi-word)
3024 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3027 INSNS is a block of code generated to perform the operation, not including
3028 the CLOBBER and final copy. All insns that compute intermediate values
3029 are first emitted, followed by the block as described above.
3031 TARGET, OP0, and OP1 are the output and inputs of the operations,
3032 respectively. OP1 may be zero for a unary operation.
3034 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3037 If TARGET is not a register, INSNS is simply emitted with no special
3038 processing. Likewise if anything in INSNS is not an INSN or if
3039 there is a libcall block inside INSNS.
3041 The final insn emitted is returned. */
3044 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3046 rtx prev
, next
, first
, last
, insn
;
3048 if (!REG_P (target
) || reload_in_progress
)
3049 return emit_insn (insns
);
3051 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3052 if (!NONJUMP_INSN_P (insn
)
3053 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3054 return emit_insn (insns
);
3056 /* First emit all insns that do not store into words of the output and remove
3057 these from the list. */
3058 for (insn
= insns
; insn
; insn
= next
)
3061 struct no_conflict_data data
;
3063 next
= NEXT_INSN (insn
);
3065 /* Some ports (cris) create a libcall regions at their own. We must
3066 avoid any potential nesting of LIBCALLs. */
3067 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3068 remove_note (insn
, note
);
3069 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3070 remove_note (insn
, note
);
3072 data
.target
= target
;
3076 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3077 if (! data
.must_stay
)
3079 if (PREV_INSN (insn
))
3080 NEXT_INSN (PREV_INSN (insn
)) = next
;
3085 PREV_INSN (next
) = PREV_INSN (insn
);
3091 prev
= get_last_insn ();
3093 /* Now write the CLOBBER of the output, followed by the setting of each
3094 of the words, followed by the final copy. */
3095 if (target
!= op0
&& target
!= op1
)
3096 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3098 for (insn
= insns
; insn
; insn
= next
)
3100 next
= NEXT_INSN (insn
);
3103 if (op1
&& REG_P (op1
))
3104 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3107 if (op0
&& REG_P (op0
))
3108 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3112 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3113 != CODE_FOR_nothing
)
3115 last
= emit_move_insn (target
, target
);
3117 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3121 last
= get_last_insn ();
3123 /* Remove any existing REG_EQUAL note from "last", or else it will
3124 be mistaken for a note referring to the full contents of the
3125 alleged libcall value when found together with the REG_RETVAL
3126 note added below. An existing note can come from an insn
3127 expansion at "last". */
3128 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3132 first
= get_insns ();
3134 first
= NEXT_INSN (prev
);
3136 /* Encapsulate the block so it gets manipulated as a unit. */
3137 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3139 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3144 /* Emit code to make a call to a constant function or a library call.
3146 INSNS is a list containing all insns emitted in the call.
3147 These insns leave the result in RESULT. Our block is to copy RESULT
3148 to TARGET, which is logically equivalent to EQUIV.
3150 We first emit any insns that set a pseudo on the assumption that these are
3151 loading constants into registers; doing so allows them to be safely cse'ed
3152 between blocks. Then we emit all the other insns in the block, followed by
3153 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3154 note with an operand of EQUIV.
3156 Moving assignments to pseudos outside of the block is done to improve
3157 the generated code, but is not required to generate correct code,
3158 hence being unable to move an assignment is not grounds for not making
3159 a libcall block. There are two reasons why it is safe to leave these
3160 insns inside the block: First, we know that these pseudos cannot be
3161 used in generated RTL outside the block since they are created for
3162 temporary purposes within the block. Second, CSE will not record the
3163 values of anything set inside a libcall block, so we know they must
3164 be dead at the end of the block.
3166 Except for the first group of insns (the ones setting pseudos), the
3167 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3170 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3172 rtx final_dest
= target
;
3173 rtx prev
, next
, first
, last
, insn
;
3175 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3176 into a MEM later. Protect the libcall block from this change. */
3177 if (! REG_P (target
) || REG_USERVAR_P (target
))
3178 target
= gen_reg_rtx (GET_MODE (target
));
3180 /* If we're using non-call exceptions, a libcall corresponding to an
3181 operation that may trap may also trap. */
3182 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3184 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3187 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3189 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3190 remove_note (insn
, note
);
3194 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3195 reg note to indicate that this call cannot throw or execute a nonlocal
3196 goto (unless there is already a REG_EH_REGION note, in which case
3198 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3201 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3204 XEXP (note
, 0) = constm1_rtx
;
3206 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3210 /* First emit all insns that set pseudos. Remove them from the list as
3211 we go. Avoid insns that set pseudos which were referenced in previous
3212 insns. These can be generated by move_by_pieces, for example,
3213 to update an address. Similarly, avoid insns that reference things
3214 set in previous insns. */
3216 for (insn
= insns
; insn
; insn
= next
)
3218 rtx set
= single_set (insn
);
3221 /* Some ports (cris) create a libcall regions at their own. We must
3222 avoid any potential nesting of LIBCALLs. */
3223 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3224 remove_note (insn
, note
);
3225 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3226 remove_note (insn
, note
);
3228 next
= NEXT_INSN (insn
);
3230 if (set
!= 0 && REG_P (SET_DEST (set
))
3231 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3233 || ((! INSN_P(insns
)
3234 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3235 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3236 && ! modified_in_p (SET_SRC (set
), insns
)
3237 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3239 if (PREV_INSN (insn
))
3240 NEXT_INSN (PREV_INSN (insn
)) = next
;
3245 PREV_INSN (next
) = PREV_INSN (insn
);
3250 /* Some ports use a loop to copy large arguments onto the stack.
3251 Don't move anything outside such a loop. */
3256 prev
= get_last_insn ();
3258 /* Write the remaining insns followed by the final copy. */
3260 for (insn
= insns
; insn
; insn
= next
)
3262 next
= NEXT_INSN (insn
);
3267 last
= emit_move_insn (target
, result
);
3268 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3269 != CODE_FOR_nothing
)
3270 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3273 /* Remove any existing REG_EQUAL note from "last", or else it will
3274 be mistaken for a note referring to the full contents of the
3275 libcall value when found together with the REG_RETVAL note added
3276 below. An existing note can come from an insn expansion at
3278 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3281 if (final_dest
!= target
)
3282 emit_move_insn (final_dest
, target
);
3285 first
= get_insns ();
3287 first
= NEXT_INSN (prev
);
3289 /* Encapsulate the block so it gets manipulated as a unit. */
3290 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3292 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3293 when the encapsulated region would not be in one basic block,
3294 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3296 bool attach_libcall_retval_notes
= true;
3297 next
= NEXT_INSN (last
);
3298 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3299 if (control_flow_insn_p (insn
))
3301 attach_libcall_retval_notes
= false;
3305 if (attach_libcall_retval_notes
)
3307 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3309 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3315 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3316 PURPOSE describes how this comparison will be used. CODE is the rtx
3317 comparison code we will be using.
3319 ??? Actually, CODE is slightly weaker than that. A target is still
3320 required to implement all of the normal bcc operations, but not
3321 required to implement all (or any) of the unordered bcc operations. */
3324 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3325 enum can_compare_purpose purpose
)
3329 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3331 if (purpose
== ccp_jump
)
3332 return bcc_gen_fctn
[(int) code
] != NULL
;
3333 else if (purpose
== ccp_store_flag
)
3334 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3336 /* There's only one cmov entry point, and it's allowed to fail. */
3339 if (purpose
== ccp_jump
3340 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3342 if (purpose
== ccp_cmov
3343 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3345 if (purpose
== ccp_store_flag
3346 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3348 mode
= GET_MODE_WIDER_MODE (mode
);
3350 while (mode
!= VOIDmode
);
3355 /* This function is called when we are going to emit a compare instruction that
3356 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3358 *PMODE is the mode of the inputs (in case they are const_int).
3359 *PUNSIGNEDP nonzero says that the operands are unsigned;
3360 this matters if they need to be widened.
3362 If they have mode BLKmode, then SIZE specifies the size of both operands.
3364 This function performs all the setup necessary so that the caller only has
3365 to emit a single comparison insn. This setup can involve doing a BLKmode
3366 comparison or emitting a library call to perform the comparison if no insn
3367 is available to handle it.
3368 The values which are passed in through pointers can be modified; the caller
3369 should perform the comparison on the modified values. Constant
3370 comparisons must have already been folded. */
3373 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3374 enum machine_mode
*pmode
, int *punsignedp
,
3375 enum can_compare_purpose purpose
)
3377 enum machine_mode mode
= *pmode
;
3378 rtx x
= *px
, y
= *py
;
3379 int unsignedp
= *punsignedp
;
3380 enum mode_class
class;
3382 class = GET_MODE_CLASS (mode
);
3384 /* If we are inside an appropriately-short loop and we are optimizing,
3385 force expensive constants into a register. */
3386 if (CONSTANT_P (x
) && optimize
3387 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3388 x
= force_reg (mode
, x
);
3390 if (CONSTANT_P (y
) && optimize
3391 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3392 y
= force_reg (mode
, y
);
3395 /* Make sure if we have a canonical comparison. The RTL
3396 documentation states that canonical comparisons are required only
3397 for targets which have cc0. */
3398 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3401 /* Don't let both operands fail to indicate the mode. */
3402 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3403 x
= force_reg (mode
, x
);
3405 /* Handle all BLKmode compares. */
3407 if (mode
== BLKmode
)
3409 enum machine_mode cmp_mode
, result_mode
;
3410 enum insn_code cmp_code
;
3415 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3419 /* Try to use a memory block compare insn - either cmpstr
3420 or cmpmem will do. */
3421 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3422 cmp_mode
!= VOIDmode
;
3423 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3425 cmp_code
= cmpmem_optab
[cmp_mode
];
3426 if (cmp_code
== CODE_FOR_nothing
)
3427 cmp_code
= cmpstr_optab
[cmp_mode
];
3428 if (cmp_code
== CODE_FOR_nothing
)
3431 /* Must make sure the size fits the insn's mode. */
3432 if ((GET_CODE (size
) == CONST_INT
3433 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3434 || (GET_MODE_BITSIZE (GET_MODE (size
))
3435 > GET_MODE_BITSIZE (cmp_mode
)))
3438 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3439 result
= gen_reg_rtx (result_mode
);
3440 size
= convert_to_mode (cmp_mode
, size
, 1);
3441 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3445 *pmode
= result_mode
;
3449 /* Otherwise call a library function, memcmp. */
3450 libfunc
= memcmp_libfunc
;
3451 length_type
= sizetype
;
3452 result_mode
= TYPE_MODE (integer_type_node
);
3453 cmp_mode
= TYPE_MODE (length_type
);
3454 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3455 TYPE_UNSIGNED (length_type
));
3457 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3464 *pmode
= result_mode
;
3468 /* Don't allow operands to the compare to trap, as that can put the
3469 compare and branch in different basic blocks. */
3470 if (flag_non_call_exceptions
)
3473 x
= force_reg (mode
, x
);
3475 y
= force_reg (mode
, y
);
3480 if (can_compare_p (*pcomparison
, mode
, purpose
))
3483 /* Handle a lib call just for the mode we are using. */
3485 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3487 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3490 /* If we want unsigned, and this mode has a distinct unsigned
3491 comparison routine, use that. */
3492 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3493 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3495 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3496 word_mode
, 2, x
, mode
, y
, mode
);
3500 if (TARGET_LIB_INT_CMP_BIASED
)
3501 /* Integer comparison returns a result that must be compared
3502 against 1, so that even if we do an unsigned compare
3503 afterward, there is still a value that can represent the
3504 result "less than". */
3514 gcc_assert (class == MODE_FLOAT
);
3515 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3518 /* Before emitting an insn with code ICODE, make sure that X, which is going
3519 to be used for operand OPNUM of the insn, is converted from mode MODE to
3520 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3521 that it is accepted by the operand predicate. Return the new value. */
3524 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3525 enum machine_mode wider_mode
, int unsignedp
)
3527 if (mode
!= wider_mode
)
3528 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3530 if (!insn_data
[icode
].operand
[opnum
].predicate
3531 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3535 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3541 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3542 we can do the comparison.
3543 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3544 be NULL_RTX which indicates that only a comparison is to be generated. */
3547 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3548 enum rtx_code comparison
, int unsignedp
, rtx label
)
3550 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3551 enum mode_class
class = GET_MODE_CLASS (mode
);
3552 enum machine_mode wider_mode
= mode
;
3554 /* Try combined insns first. */
3557 enum insn_code icode
;
3558 PUT_MODE (test
, wider_mode
);
3562 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3564 if (icode
!= CODE_FOR_nothing
3565 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3567 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3568 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3569 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3574 /* Handle some compares against zero. */
3575 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3576 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3578 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3579 emit_insn (GEN_FCN (icode
) (x
));
3581 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3585 /* Handle compares for which there is a directly suitable insn. */
3587 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3588 if (icode
!= CODE_FOR_nothing
)
3590 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3591 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3592 emit_insn (GEN_FCN (icode
) (x
, y
));
3594 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3598 if (class != MODE_INT
&& class != MODE_FLOAT
3599 && class != MODE_COMPLEX_FLOAT
)
3602 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3604 while (wider_mode
!= VOIDmode
);
3609 /* Generate code to compare X with Y so that the condition codes are
3610 set and to jump to LABEL if the condition is true. If X is a
3611 constant and Y is not a constant, then the comparison is swapped to
3612 ensure that the comparison RTL has the canonical form.
3614 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3615 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3616 the proper branch condition code.
3618 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3620 MODE is the mode of the inputs (in case they are const_int).
3622 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3623 be passed unchanged to emit_cmp_insn, then potentially converted into an
3624 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3627 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3628 enum machine_mode mode
, int unsignedp
, rtx label
)
3630 rtx op0
= x
, op1
= y
;
3632 /* Swap operands and condition to ensure canonical RTL. */
3633 if (swap_commutative_operands_p (x
, y
))
3635 /* If we're not emitting a branch, this means some caller
3640 comparison
= swap_condition (comparison
);
3644 /* If OP0 is still a constant, then both X and Y must be constants.
3645 Force X into a register to create canonical RTL. */
3646 if (CONSTANT_P (op0
))
3647 op0
= force_reg (mode
, op0
);
3651 comparison
= unsigned_condition (comparison
);
3653 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3655 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3658 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3661 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3662 enum machine_mode mode
, int unsignedp
)
3664 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3667 /* Emit a library call comparison between floating point X and Y.
3668 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3671 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3672 enum machine_mode
*pmode
, int *punsignedp
)
3674 enum rtx_code comparison
= *pcomparison
;
3675 enum rtx_code swapped
= swap_condition (comparison
);
3676 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3679 enum machine_mode orig_mode
= GET_MODE (x
);
3680 enum machine_mode mode
;
3681 rtx value
, target
, insns
, equiv
;
3683 bool reversed_p
= false;
3685 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3687 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3690 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3693 tmp
= x
; x
= y
; y
= tmp
;
3694 comparison
= swapped
;
3698 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3699 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3701 comparison
= reversed
;
3707 gcc_assert (mode
!= VOIDmode
);
3709 if (mode
!= orig_mode
)
3711 x
= convert_to_mode (mode
, x
, 0);
3712 y
= convert_to_mode (mode
, y
, 0);
3715 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3716 the RTL. The allows the RTL optimizers to delete the libcall if the
3717 condition can be determined at compile-time. */
3718 if (comparison
== UNORDERED
)
3720 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3721 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3722 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3723 temp
, const_true_rtx
, equiv
);
3727 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3728 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3730 rtx true_rtx
, false_rtx
;
3735 true_rtx
= const0_rtx
;
3736 false_rtx
= const_true_rtx
;
3740 true_rtx
= const_true_rtx
;
3741 false_rtx
= const0_rtx
;
3745 true_rtx
= const1_rtx
;
3746 false_rtx
= const0_rtx
;
3750 true_rtx
= const0_rtx
;
3751 false_rtx
= constm1_rtx
;
3755 true_rtx
= constm1_rtx
;
3756 false_rtx
= const0_rtx
;
3760 true_rtx
= const0_rtx
;
3761 false_rtx
= const1_rtx
;
3767 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3768 equiv
, true_rtx
, false_rtx
);
3773 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3774 word_mode
, 2, x
, mode
, y
, mode
);
3775 insns
= get_insns ();
3778 target
= gen_reg_rtx (word_mode
);
3779 emit_libcall_block (insns
, target
, value
, equiv
);
3781 if (comparison
== UNORDERED
3782 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3783 comparison
= reversed_p
? EQ
: NE
;
3788 *pcomparison
= comparison
;
3792 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3795 emit_indirect_jump (rtx loc
)
3797 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
3799 loc
= copy_to_mode_reg (Pmode
, loc
);
3801 emit_jump_insn (gen_indirect_jump (loc
));
3805 #ifdef HAVE_conditional_move
3807 /* Emit a conditional move instruction if the machine supports one for that
3808 condition and machine mode.
3810 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3811 the mode to use should they be constants. If it is VOIDmode, they cannot
3814 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3815 should be stored there. MODE is the mode to use should they be constants.
3816 If it is VOIDmode, they cannot both be constants.
3818 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3819 is not supported. */
3822 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3823 enum machine_mode cmode
, rtx op2
, rtx op3
,
3824 enum machine_mode mode
, int unsignedp
)
3826 rtx tem
, subtarget
, comparison
, insn
;
3827 enum insn_code icode
;
3828 enum rtx_code reversed
;
3830 /* If one operand is constant, make it the second one. Only do this
3831 if the other operand is not constant as well. */
3833 if (swap_commutative_operands_p (op0
, op1
))
3838 code
= swap_condition (code
);
3841 /* get_condition will prefer to generate LT and GT even if the old
3842 comparison was against zero, so undo that canonicalization here since
3843 comparisons against zero are cheaper. */
3844 if (code
== LT
&& op1
== const1_rtx
)
3845 code
= LE
, op1
= const0_rtx
;
3846 else if (code
== GT
&& op1
== constm1_rtx
)
3847 code
= GE
, op1
= const0_rtx
;
3849 if (cmode
== VOIDmode
)
3850 cmode
= GET_MODE (op0
);
3852 if (swap_commutative_operands_p (op2
, op3
)
3853 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3862 if (mode
== VOIDmode
)
3863 mode
= GET_MODE (op2
);
3865 icode
= movcc_gen_code
[mode
];
3867 if (icode
== CODE_FOR_nothing
)
3871 target
= gen_reg_rtx (mode
);
3875 /* If the insn doesn't accept these operands, put them in pseudos. */
3877 if (!insn_data
[icode
].operand
[0].predicate
3878 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3879 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3881 if (!insn_data
[icode
].operand
[2].predicate
3882 (op2
, insn_data
[icode
].operand
[2].mode
))
3883 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3885 if (!insn_data
[icode
].operand
[3].predicate
3886 (op3
, insn_data
[icode
].operand
[3].mode
))
3887 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3889 /* Everything should now be in the suitable form, so emit the compare insn
3890 and then the conditional move. */
3893 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3895 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3896 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3897 return NULL and let the caller figure out how best to deal with this
3899 if (GET_CODE (comparison
) != code
)
3902 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3904 /* If that failed, then give up. */
3910 if (subtarget
!= target
)
3911 convert_move (target
, subtarget
, 0);
3916 /* Return nonzero if a conditional move of mode MODE is supported.
3918 This function is for combine so it can tell whether an insn that looks
3919 like a conditional move is actually supported by the hardware. If we
3920 guess wrong we lose a bit on optimization, but that's it. */
3921 /* ??? sparc64 supports conditionally moving integers values based on fp
3922 comparisons, and vice versa. How do we handle them? */
3925 can_conditionally_move_p (enum machine_mode mode
)
3927 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3933 #endif /* HAVE_conditional_move */
3935 /* Emit a conditional addition instruction if the machine supports one for that
3936 condition and machine mode.
3938 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3939 the mode to use should they be constants. If it is VOIDmode, they cannot
3942 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3943 should be stored there. MODE is the mode to use should they be constants.
3944 If it is VOIDmode, they cannot both be constants.
3946 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3947 is not supported. */
3950 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3951 enum machine_mode cmode
, rtx op2
, rtx op3
,
3952 enum machine_mode mode
, int unsignedp
)
3954 rtx tem
, subtarget
, comparison
, insn
;
3955 enum insn_code icode
;
3956 enum rtx_code reversed
;
3958 /* If one operand is constant, make it the second one. Only do this
3959 if the other operand is not constant as well. */
3961 if (swap_commutative_operands_p (op0
, op1
))
3966 code
= swap_condition (code
);
3969 /* get_condition will prefer to generate LT and GT even if the old
3970 comparison was against zero, so undo that canonicalization here since
3971 comparisons against zero are cheaper. */
3972 if (code
== LT
&& op1
== const1_rtx
)
3973 code
= LE
, op1
= const0_rtx
;
3974 else if (code
== GT
&& op1
== constm1_rtx
)
3975 code
= GE
, op1
= const0_rtx
;
3977 if (cmode
== VOIDmode
)
3978 cmode
= GET_MODE (op0
);
3980 if (swap_commutative_operands_p (op2
, op3
)
3981 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3990 if (mode
== VOIDmode
)
3991 mode
= GET_MODE (op2
);
3993 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3995 if (icode
== CODE_FOR_nothing
)
3999 target
= gen_reg_rtx (mode
);
4001 /* If the insn doesn't accept these operands, put them in pseudos. */
4003 if (!insn_data
[icode
].operand
[0].predicate
4004 (target
, insn_data
[icode
].operand
[0].mode
))
4005 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4009 if (!insn_data
[icode
].operand
[2].predicate
4010 (op2
, insn_data
[icode
].operand
[2].mode
))
4011 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4013 if (!insn_data
[icode
].operand
[3].predicate
4014 (op3
, insn_data
[icode
].operand
[3].mode
))
4015 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4017 /* Everything should now be in the suitable form, so emit the compare insn
4018 and then the conditional move. */
4021 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4023 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4024 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4025 return NULL and let the caller figure out how best to deal with this
4027 if (GET_CODE (comparison
) != code
)
4030 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4032 /* If that failed, then give up. */
4038 if (subtarget
!= target
)
4039 convert_move (target
, subtarget
, 0);
4044 /* These functions attempt to generate an insn body, rather than
4045 emitting the insn, but if the gen function already emits them, we
4046 make no attempt to turn them back into naked patterns. */
4048 /* Generate and return an insn body to add Y to X. */
4051 gen_add2_insn (rtx x
, rtx y
)
4053 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4055 gcc_assert (insn_data
[icode
].operand
[0].predicate
4056 (x
, insn_data
[icode
].operand
[0].mode
));
4057 gcc_assert (insn_data
[icode
].operand
[1].predicate
4058 (x
, insn_data
[icode
].operand
[1].mode
));
4059 gcc_assert (insn_data
[icode
].operand
[2].predicate
4060 (y
, insn_data
[icode
].operand
[2].mode
));
4062 return GEN_FCN (icode
) (x
, x
, y
);
4065 /* Generate and return an insn body to add r1 and c,
4066 storing the result in r0. */
4068 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4070 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4072 if (icode
== CODE_FOR_nothing
4073 || !(insn_data
[icode
].operand
[0].predicate
4074 (r0
, insn_data
[icode
].operand
[0].mode
))
4075 || !(insn_data
[icode
].operand
[1].predicate
4076 (r1
, insn_data
[icode
].operand
[1].mode
))
4077 || !(insn_data
[icode
].operand
[2].predicate
4078 (c
, insn_data
[icode
].operand
[2].mode
)))
4081 return GEN_FCN (icode
) (r0
, r1
, c
);
4085 have_add2_insn (rtx x
, rtx y
)
4089 gcc_assert (GET_MODE (x
) != VOIDmode
);
4091 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4093 if (icode
== CODE_FOR_nothing
)
4096 if (!(insn_data
[icode
].operand
[0].predicate
4097 (x
, insn_data
[icode
].operand
[0].mode
))
4098 || !(insn_data
[icode
].operand
[1].predicate
4099 (x
, insn_data
[icode
].operand
[1].mode
))
4100 || !(insn_data
[icode
].operand
[2].predicate
4101 (y
, insn_data
[icode
].operand
[2].mode
)))
4107 /* Generate and return an insn body to subtract Y from X. */
4110 gen_sub2_insn (rtx x
, rtx y
)
4112 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4114 gcc_assert (insn_data
[icode
].operand
[0].predicate
4115 (x
, insn_data
[icode
].operand
[0].mode
));
4116 gcc_assert (insn_data
[icode
].operand
[1].predicate
4117 (x
, insn_data
[icode
].operand
[1].mode
));
4118 gcc_assert (insn_data
[icode
].operand
[2].predicate
4119 (y
, insn_data
[icode
].operand
[2].mode
));
4121 return GEN_FCN (icode
) (x
, x
, y
);
4124 /* Generate and return an insn body to subtract r1 and c,
4125 storing the result in r0. */
4127 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4129 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4131 if (icode
== CODE_FOR_nothing
4132 || !(insn_data
[icode
].operand
[0].predicate
4133 (r0
, insn_data
[icode
].operand
[0].mode
))
4134 || !(insn_data
[icode
].operand
[1].predicate
4135 (r1
, insn_data
[icode
].operand
[1].mode
))
4136 || !(insn_data
[icode
].operand
[2].predicate
4137 (c
, insn_data
[icode
].operand
[2].mode
)))
4140 return GEN_FCN (icode
) (r0
, r1
, c
);
4144 have_sub2_insn (rtx x
, rtx y
)
4148 gcc_assert (GET_MODE (x
) != VOIDmode
);
4150 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4152 if (icode
== CODE_FOR_nothing
)
4155 if (!(insn_data
[icode
].operand
[0].predicate
4156 (x
, insn_data
[icode
].operand
[0].mode
))
4157 || !(insn_data
[icode
].operand
[1].predicate
4158 (x
, insn_data
[icode
].operand
[1].mode
))
4159 || !(insn_data
[icode
].operand
[2].predicate
4160 (y
, insn_data
[icode
].operand
[2].mode
)))
4166 /* Generate the body of an instruction to copy Y into X.
4167 It may be a list of insns, if one insn isn't enough. */
4170 gen_move_insn (rtx x
, rtx y
)
4175 emit_move_insn_1 (x
, y
);
4181 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4182 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4183 no such operation exists, CODE_FOR_nothing will be returned. */
4186 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4190 #ifdef HAVE_ptr_extend
4192 return CODE_FOR_ptr_extend
;
4195 tab
= unsignedp
? zext_optab
: sext_optab
;
4196 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4199 /* Generate the body of an insn to extend Y (with mode MFROM)
4200 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4203 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4204 enum machine_mode mfrom
, int unsignedp
)
4206 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4207 return GEN_FCN (icode
) (x
, y
);
4210 /* can_fix_p and can_float_p say whether the target machine
4211 can directly convert a given fixed point type to
4212 a given floating point type, or vice versa.
4213 The returned value is the CODE_FOR_... value to use,
4214 or CODE_FOR_nothing if these modes cannot be directly converted.
4216 *TRUNCP_PTR is set to 1 if it is necessary to output
4217 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4219 static enum insn_code
4220 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4221 int unsignedp
, int *truncp_ptr
)
4224 enum insn_code icode
;
4226 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4227 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4228 if (icode
!= CODE_FOR_nothing
)
4234 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4235 for this to work. We need to rework the fix* and ftrunc* patterns
4236 and documentation. */
4237 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4238 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4239 if (icode
!= CODE_FOR_nothing
4240 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4247 return CODE_FOR_nothing
;
4250 static enum insn_code
4251 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4256 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4257 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4260 /* Generate code to convert FROM to floating point
4261 and store in TO. FROM must be fixed point and not VOIDmode.
4262 UNSIGNEDP nonzero means regard FROM as unsigned.
4263 Normally this is done by correcting the final value
4264 if it is negative. */
4267 expand_float (rtx to
, rtx from
, int unsignedp
)
4269 enum insn_code icode
;
4271 enum machine_mode fmode
, imode
;
4273 /* Crash now, because we won't be able to decide which mode to use. */
4274 gcc_assert (GET_MODE (from
) != VOIDmode
);
4276 /* Look for an insn to do the conversion. Do it in the specified
4277 modes if possible; otherwise convert either input, output or both to
4278 wider mode. If the integer mode is wider than the mode of FROM,
4279 we can do the conversion signed even if the input is unsigned. */
4281 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4282 fmode
= GET_MODE_WIDER_MODE (fmode
))
4283 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4284 imode
= GET_MODE_WIDER_MODE (imode
))
4286 int doing_unsigned
= unsignedp
;
4288 if (fmode
!= GET_MODE (to
)
4289 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4292 icode
= can_float_p (fmode
, imode
, unsignedp
);
4293 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4294 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4296 if (icode
!= CODE_FOR_nothing
)
4298 if (imode
!= GET_MODE (from
))
4299 from
= convert_to_mode (imode
, from
, unsignedp
);
4301 if (fmode
!= GET_MODE (to
))
4302 target
= gen_reg_rtx (fmode
);
4304 emit_unop_insn (icode
, target
, from
,
4305 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4308 convert_move (to
, target
, 0);
4313 /* Unsigned integer, and no way to convert directly.
4314 Convert as signed, then conditionally adjust the result. */
4317 rtx label
= gen_label_rtx ();
4319 REAL_VALUE_TYPE offset
;
4321 /* Look for a usable floating mode FMODE wider than the source and at
4322 least as wide as the target. Using FMODE will avoid rounding woes
4323 with unsigned values greater than the signed maximum value. */
4325 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4326 fmode
= GET_MODE_WIDER_MODE (fmode
))
4327 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4328 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4331 if (fmode
== VOIDmode
)
4333 /* There is no such mode. Pretend the target is wide enough. */
4334 fmode
= GET_MODE (to
);
4336 /* Avoid double-rounding when TO is narrower than FROM. */
4337 if ((significand_size (fmode
) + 1)
4338 < GET_MODE_BITSIZE (GET_MODE (from
)))
4341 rtx neglabel
= gen_label_rtx ();
4343 /* Don't use TARGET if it isn't a register, is a hard register,
4344 or is the wrong mode. */
4346 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4347 || GET_MODE (target
) != fmode
)
4348 target
= gen_reg_rtx (fmode
);
4350 imode
= GET_MODE (from
);
4351 do_pending_stack_adjust ();
4353 /* Test whether the sign bit is set. */
4354 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4357 /* The sign bit is not set. Convert as signed. */
4358 expand_float (target
, from
, 0);
4359 emit_jump_insn (gen_jump (label
));
4362 /* The sign bit is set.
4363 Convert to a usable (positive signed) value by shifting right
4364 one bit, while remembering if a nonzero bit was shifted
4365 out; i.e., compute (from & 1) | (from >> 1). */
4367 emit_label (neglabel
);
4368 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4369 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4370 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4372 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4374 expand_float (target
, temp
, 0);
4376 /* Multiply by 2 to undo the shift above. */
4377 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4378 target
, 0, OPTAB_LIB_WIDEN
);
4380 emit_move_insn (target
, temp
);
4382 do_pending_stack_adjust ();
4388 /* If we are about to do some arithmetic to correct for an
4389 unsigned operand, do it in a pseudo-register. */
4391 if (GET_MODE (to
) != fmode
4392 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4393 target
= gen_reg_rtx (fmode
);
4395 /* Convert as signed integer to floating. */
4396 expand_float (target
, from
, 0);
4398 /* If FROM is negative (and therefore TO is negative),
4399 correct its value by 2**bitwidth. */
4401 do_pending_stack_adjust ();
4402 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4406 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4407 temp
= expand_binop (fmode
, add_optab
, target
,
4408 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4409 target
, 0, OPTAB_LIB_WIDEN
);
4411 emit_move_insn (target
, temp
);
4413 do_pending_stack_adjust ();
4418 /* No hardware instruction available; call a library routine. */
4423 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4425 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4426 from
= convert_to_mode (SImode
, from
, unsignedp
);
4428 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4429 gcc_assert (libfunc
);
4433 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4434 GET_MODE (to
), 1, from
,
4436 insns
= get_insns ();
4439 emit_libcall_block (insns
, target
, value
,
4440 gen_rtx_FLOAT (GET_MODE (to
), from
));
4445 /* Copy result to requested destination
4446 if we have been computing in a temp location. */
4450 if (GET_MODE (target
) == GET_MODE (to
))
4451 emit_move_insn (to
, target
);
4453 convert_move (to
, target
, 0);
4457 /* Generate code to convert FROM to fixed point and store in TO. FROM
4458 must be floating point. */
4461 expand_fix (rtx to
, rtx from
, int unsignedp
)
4463 enum insn_code icode
;
4465 enum machine_mode fmode
, imode
;
4468 /* We first try to find a pair of modes, one real and one integer, at
4469 least as wide as FROM and TO, respectively, in which we can open-code
4470 this conversion. If the integer mode is wider than the mode of TO,
4471 we can do the conversion either signed or unsigned. */
4473 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4474 fmode
= GET_MODE_WIDER_MODE (fmode
))
4475 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4476 imode
= GET_MODE_WIDER_MODE (imode
))
4478 int doing_unsigned
= unsignedp
;
4480 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4481 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4482 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4484 if (icode
!= CODE_FOR_nothing
)
4486 if (fmode
!= GET_MODE (from
))
4487 from
= convert_to_mode (fmode
, from
, 0);
4491 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4492 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4496 if (imode
!= GET_MODE (to
))
4497 target
= gen_reg_rtx (imode
);
4499 emit_unop_insn (icode
, target
, from
,
4500 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4502 convert_move (to
, target
, unsignedp
);
4507 /* For an unsigned conversion, there is one more way to do it.
4508 If we have a signed conversion, we generate code that compares
4509 the real value to the largest representable positive number. If if
4510 is smaller, the conversion is done normally. Otherwise, subtract
4511 one plus the highest signed number, convert, and add it back.
4513 We only need to check all real modes, since we know we didn't find
4514 anything with a wider integer mode.
4516 This code used to extend FP value into mode wider than the destination.
4517 This is not needed. Consider, for instance conversion from SFmode
4520 The hot path trought the code is dealing with inputs smaller than 2^63
4521 and doing just the conversion, so there is no bits to lose.
4523 In the other path we know the value is positive in the range 2^63..2^64-1
4524 inclusive. (as for other imput overflow happens and result is undefined)
4525 So we know that the most important bit set in mantissa corresponds to
4526 2^63. The subtraction of 2^63 should not generate any rounding as it
4527 simply clears out that bit. The rest is trivial. */
4529 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4530 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4531 fmode
= GET_MODE_WIDER_MODE (fmode
))
4532 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4536 REAL_VALUE_TYPE offset
;
4537 rtx limit
, lab1
, lab2
, insn
;
4539 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4540 real_2expN (&offset
, bitsize
- 1);
4541 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4542 lab1
= gen_label_rtx ();
4543 lab2
= gen_label_rtx ();
4545 if (fmode
!= GET_MODE (from
))
4546 from
= convert_to_mode (fmode
, from
, 0);
4548 /* See if we need to do the subtraction. */
4549 do_pending_stack_adjust ();
4550 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4553 /* If not, do the signed "fix" and branch around fixup code. */
4554 expand_fix (to
, from
, 0);
4555 emit_jump_insn (gen_jump (lab2
));
4558 /* Otherwise, subtract 2**(N-1), convert to signed number,
4559 then add 2**(N-1). Do the addition using XOR since this
4560 will often generate better code. */
4562 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4563 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4564 expand_fix (to
, target
, 0);
4565 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4567 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4569 to
, 1, OPTAB_LIB_WIDEN
);
4572 emit_move_insn (to
, target
);
4576 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4577 != CODE_FOR_nothing
)
4579 /* Make a place for a REG_NOTE and add it. */
4580 insn
= emit_move_insn (to
, to
);
4581 set_unique_reg_note (insn
,
4583 gen_rtx_fmt_e (UNSIGNED_FIX
,
4591 /* We can't do it with an insn, so use a library call. But first ensure
4592 that the mode of TO is at least as wide as SImode, since those are the
4593 only library calls we know about. */
4595 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4597 target
= gen_reg_rtx (SImode
);
4599 expand_fix (target
, from
, unsignedp
);
4607 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4608 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4609 gcc_assert (libfunc
);
4613 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4614 GET_MODE (to
), 1, from
,
4616 insns
= get_insns ();
4619 emit_libcall_block (insns
, target
, value
,
4620 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4621 GET_MODE (to
), from
));
4626 if (GET_MODE (to
) == GET_MODE (target
))
4627 emit_move_insn (to
, target
);
4629 convert_move (to
, target
, 0);
4633 /* Report whether we have an instruction to perform the operation
4634 specified by CODE on operands of mode MODE. */
4636 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4638 return (code_to_optab
[(int) code
] != 0
4639 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4640 != CODE_FOR_nothing
));
4643 /* Create a blank optab. */
4648 optab op
= ggc_alloc (sizeof (struct optab
));
4649 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4651 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4652 op
->handlers
[i
].libfunc
= 0;
4658 static convert_optab
4659 new_convert_optab (void)
4662 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4663 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4664 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4666 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4667 op
->handlers
[i
][j
].libfunc
= 0;
4672 /* Same, but fill in its code as CODE, and write it into the
4673 code_to_optab table. */
4675 init_optab (enum rtx_code code
)
4677 optab op
= new_optab ();
4679 code_to_optab
[(int) code
] = op
;
4683 /* Same, but fill in its code as CODE, and do _not_ write it into
4684 the code_to_optab table. */
4686 init_optabv (enum rtx_code code
)
4688 optab op
= new_optab ();
4693 /* Conversion optabs never go in the code_to_optab table. */
4694 static inline convert_optab
4695 init_convert_optab (enum rtx_code code
)
4697 convert_optab op
= new_convert_optab ();
4702 /* Initialize the libfunc fields of an entire group of entries in some
4703 optab. Each entry is set equal to a string consisting of a leading
4704 pair of underscores followed by a generic operation name followed by
4705 a mode name (downshifted to lowercase) followed by a single character
4706 representing the number of operands for the given operation (which is
4707 usually one of the characters '2', '3', or '4').
4709 OPTABLE is the table in which libfunc fields are to be initialized.
4710 FIRST_MODE is the first machine mode index in the given optab to
4712 LAST_MODE is the last machine mode index in the given optab to
4714 OPNAME is the generic (string) name of the operation.
4715 SUFFIX is the character which specifies the number of operands for
4716 the given generic operation.
4720 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4721 const char *opname
, int suffix
)
4724 unsigned opname_len
= strlen (opname
);
4726 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4727 mode
= (enum machine_mode
) ((int) mode
+ 1))
4729 const char *mname
= GET_MODE_NAME (mode
);
4730 unsigned mname_len
= strlen (mname
);
4731 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4738 for (q
= opname
; *q
; )
4740 for (q
= mname
; *q
; q
++)
4741 *p
++ = TOLOWER (*q
);
4745 optable
->handlers
[(int) mode
].libfunc
4746 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4750 /* Initialize the libfunc fields of an entire group of entries in some
4751 optab which correspond to all integer mode operations. The parameters
4752 have the same meaning as similarly named ones for the `init_libfuncs'
4753 routine. (See above). */
4756 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4758 int maxsize
= 2*BITS_PER_WORD
;
4759 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4760 maxsize
= LONG_LONG_TYPE_SIZE
;
4761 init_libfuncs (optable
, word_mode
,
4762 mode_for_size (maxsize
, MODE_INT
, 0),
4766 /* Initialize the libfunc fields of an entire group of entries in some
4767 optab which correspond to all real mode operations. The parameters
4768 have the same meaning as similarly named ones for the `init_libfuncs'
4769 routine. (See above). */
4772 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4774 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4777 /* Initialize the libfunc fields of an entire group of entries of an
4778 inter-mode-class conversion optab. The string formation rules are
4779 similar to the ones for init_libfuncs, above, but instead of having
4780 a mode name and an operand count these functions have two mode names
4781 and no operand count. */
4783 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4784 enum mode_class from_class
,
4785 enum mode_class to_class
)
4787 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4788 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4789 size_t opname_len
= strlen (opname
);
4790 size_t max_mname_len
= 0;
4792 enum machine_mode fmode
, tmode
;
4793 const char *fname
, *tname
;
4795 char *libfunc_name
, *suffix
;
4798 for (fmode
= first_from_mode
;
4800 fmode
= GET_MODE_WIDER_MODE (fmode
))
4801 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4803 for (tmode
= first_to_mode
;
4805 tmode
= GET_MODE_WIDER_MODE (tmode
))
4806 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4808 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4809 libfunc_name
[0] = '_';
4810 libfunc_name
[1] = '_';
4811 memcpy (&libfunc_name
[2], opname
, opname_len
);
4812 suffix
= libfunc_name
+ opname_len
+ 2;
4814 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4815 fmode
= GET_MODE_WIDER_MODE (fmode
))
4816 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4817 tmode
= GET_MODE_WIDER_MODE (tmode
))
4819 fname
= GET_MODE_NAME (fmode
);
4820 tname
= GET_MODE_NAME (tmode
);
4823 for (q
= fname
; *q
; p
++, q
++)
4825 for (q
= tname
; *q
; p
++, q
++)
4830 tab
->handlers
[tmode
][fmode
].libfunc
4831 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4836 /* Initialize the libfunc fields of an entire group of entries of an
4837 intra-mode-class conversion optab. The string formation rules are
4838 similar to the ones for init_libfunc, above. WIDENING says whether
4839 the optab goes from narrow to wide modes or vice versa. These functions
4840 have two mode names _and_ an operand count. */
4842 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4843 enum mode_class
class, bool widening
)
4845 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4846 size_t opname_len
= strlen (opname
);
4847 size_t max_mname_len
= 0;
4849 enum machine_mode nmode
, wmode
;
4850 const char *nname
, *wname
;
4852 char *libfunc_name
, *suffix
;
4855 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4856 nmode
= GET_MODE_WIDER_MODE (nmode
))
4857 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4859 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4860 libfunc_name
[0] = '_';
4861 libfunc_name
[1] = '_';
4862 memcpy (&libfunc_name
[2], opname
, opname_len
);
4863 suffix
= libfunc_name
+ opname_len
+ 2;
4865 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4866 nmode
= GET_MODE_WIDER_MODE (nmode
))
4867 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4868 wmode
= GET_MODE_WIDER_MODE (wmode
))
4870 nname
= GET_MODE_NAME (nmode
);
4871 wname
= GET_MODE_NAME (wmode
);
4874 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4876 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4882 tab
->handlers
[widening
? wmode
: nmode
]
4883 [widening
? nmode
: wmode
].libfunc
4884 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4891 init_one_libfunc (const char *name
)
4895 /* Create a FUNCTION_DECL that can be passed to
4896 targetm.encode_section_info. */
4897 /* ??? We don't have any type information except for this is
4898 a function. Pretend this is "int foo()". */
4899 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4900 build_function_type (integer_type_node
, NULL_TREE
));
4901 DECL_ARTIFICIAL (decl
) = 1;
4902 DECL_EXTERNAL (decl
) = 1;
4903 TREE_PUBLIC (decl
) = 1;
4905 symbol
= XEXP (DECL_RTL (decl
), 0);
4907 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4908 are the flags assigned by targetm.encode_section_info. */
4909 SYMBOL_REF_DECL (symbol
) = 0;
4914 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4915 MODE to NAME, which should be either 0 or a string constant. */
4917 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4920 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4922 optable
->handlers
[mode
].libfunc
= 0;
4925 /* Call this to reset the function entry for one conversion optab
4926 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4927 either 0 or a string constant. */
4929 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4930 enum machine_mode fmode
, const char *name
)
4933 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4935 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4938 /* Call this once to initialize the contents of the optabs
4939 appropriately for the current target machine. */
4946 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4948 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4949 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4951 #ifdef HAVE_conditional_move
4952 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4953 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4956 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4958 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4959 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4962 add_optab
= init_optab (PLUS
);
4963 addv_optab
= init_optabv (PLUS
);
4964 sub_optab
= init_optab (MINUS
);
4965 subv_optab
= init_optabv (MINUS
);
4966 smul_optab
= init_optab (MULT
);
4967 smulv_optab
= init_optabv (MULT
);
4968 smul_highpart_optab
= init_optab (UNKNOWN
);
4969 umul_highpart_optab
= init_optab (UNKNOWN
);
4970 smul_widen_optab
= init_optab (UNKNOWN
);
4971 umul_widen_optab
= init_optab (UNKNOWN
);
4972 sdiv_optab
= init_optab (DIV
);
4973 sdivv_optab
= init_optabv (DIV
);
4974 sdivmod_optab
= init_optab (UNKNOWN
);
4975 udiv_optab
= init_optab (UDIV
);
4976 udivmod_optab
= init_optab (UNKNOWN
);
4977 smod_optab
= init_optab (MOD
);
4978 umod_optab
= init_optab (UMOD
);
4979 fmod_optab
= init_optab (UNKNOWN
);
4980 drem_optab
= init_optab (UNKNOWN
);
4981 ftrunc_optab
= init_optab (UNKNOWN
);
4982 and_optab
= init_optab (AND
);
4983 ior_optab
= init_optab (IOR
);
4984 xor_optab
= init_optab (XOR
);
4985 ashl_optab
= init_optab (ASHIFT
);
4986 ashr_optab
= init_optab (ASHIFTRT
);
4987 lshr_optab
= init_optab (LSHIFTRT
);
4988 rotl_optab
= init_optab (ROTATE
);
4989 rotr_optab
= init_optab (ROTATERT
);
4990 smin_optab
= init_optab (SMIN
);
4991 smax_optab
= init_optab (SMAX
);
4992 umin_optab
= init_optab (UMIN
);
4993 umax_optab
= init_optab (UMAX
);
4994 pow_optab
= init_optab (UNKNOWN
);
4995 atan2_optab
= init_optab (UNKNOWN
);
4997 /* These three have codes assigned exclusively for the sake of
4999 mov_optab
= init_optab (SET
);
5000 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5001 cmp_optab
= init_optab (COMPARE
);
5003 ucmp_optab
= init_optab (UNKNOWN
);
5004 tst_optab
= init_optab (UNKNOWN
);
5006 eq_optab
= init_optab (EQ
);
5007 ne_optab
= init_optab (NE
);
5008 gt_optab
= init_optab (GT
);
5009 ge_optab
= init_optab (GE
);
5010 lt_optab
= init_optab (LT
);
5011 le_optab
= init_optab (LE
);
5012 unord_optab
= init_optab (UNORDERED
);
5014 neg_optab
= init_optab (NEG
);
5015 negv_optab
= init_optabv (NEG
);
5016 abs_optab
= init_optab (ABS
);
5017 absv_optab
= init_optabv (ABS
);
5018 addcc_optab
= init_optab (UNKNOWN
);
5019 one_cmpl_optab
= init_optab (NOT
);
5020 ffs_optab
= init_optab (FFS
);
5021 clz_optab
= init_optab (CLZ
);
5022 ctz_optab
= init_optab (CTZ
);
5023 popcount_optab
= init_optab (POPCOUNT
);
5024 parity_optab
= init_optab (PARITY
);
5025 sqrt_optab
= init_optab (SQRT
);
5026 floor_optab
= init_optab (UNKNOWN
);
5027 lfloor_optab
= init_optab (UNKNOWN
);
5028 ceil_optab
= init_optab (UNKNOWN
);
5029 lceil_optab
= init_optab (UNKNOWN
);
5030 round_optab
= init_optab (UNKNOWN
);
5031 btrunc_optab
= init_optab (UNKNOWN
);
5032 nearbyint_optab
= init_optab (UNKNOWN
);
5033 rint_optab
= init_optab (UNKNOWN
);
5034 lrint_optab
= init_optab (UNKNOWN
);
5035 sincos_optab
= init_optab (UNKNOWN
);
5036 sin_optab
= init_optab (UNKNOWN
);
5037 asin_optab
= init_optab (UNKNOWN
);
5038 cos_optab
= init_optab (UNKNOWN
);
5039 acos_optab
= init_optab (UNKNOWN
);
5040 exp_optab
= init_optab (UNKNOWN
);
5041 exp10_optab
= init_optab (UNKNOWN
);
5042 exp2_optab
= init_optab (UNKNOWN
);
5043 expm1_optab
= init_optab (UNKNOWN
);
5044 ldexp_optab
= init_optab (UNKNOWN
);
5045 logb_optab
= init_optab (UNKNOWN
);
5046 ilogb_optab
= init_optab (UNKNOWN
);
5047 log_optab
= init_optab (UNKNOWN
);
5048 log10_optab
= init_optab (UNKNOWN
);
5049 log2_optab
= init_optab (UNKNOWN
);
5050 log1p_optab
= init_optab (UNKNOWN
);
5051 tan_optab
= init_optab (UNKNOWN
);
5052 atan_optab
= init_optab (UNKNOWN
);
5053 copysign_optab
= init_optab (UNKNOWN
);
5055 strlen_optab
= init_optab (UNKNOWN
);
5056 cbranch_optab
= init_optab (UNKNOWN
);
5057 cmov_optab
= init_optab (UNKNOWN
);
5058 cstore_optab
= init_optab (UNKNOWN
);
5059 push_optab
= init_optab (UNKNOWN
);
5061 reduc_smax_optab
= init_optab (UNKNOWN
);
5062 reduc_umax_optab
= init_optab (UNKNOWN
);
5063 reduc_smin_optab
= init_optab (UNKNOWN
);
5064 reduc_umin_optab
= init_optab (UNKNOWN
);
5065 reduc_splus_optab
= init_optab (UNKNOWN
);
5066 reduc_uplus_optab
= init_optab (UNKNOWN
);
5068 vec_extract_optab
= init_optab (UNKNOWN
);
5069 vec_set_optab
= init_optab (UNKNOWN
);
5070 vec_init_optab
= init_optab (UNKNOWN
);
5071 vec_shl_optab
= init_optab (UNKNOWN
);
5072 vec_shr_optab
= init_optab (UNKNOWN
);
5073 vec_realign_load_optab
= init_optab (UNKNOWN
);
5074 movmisalign_optab
= init_optab (UNKNOWN
);
5076 powi_optab
= init_optab (UNKNOWN
);
5079 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5080 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5081 trunc_optab
= init_convert_optab (TRUNCATE
);
5082 sfix_optab
= init_convert_optab (FIX
);
5083 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5084 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5085 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5086 sfloat_optab
= init_convert_optab (FLOAT
);
5087 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5089 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5091 movmem_optab
[i
] = CODE_FOR_nothing
;
5092 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5093 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5094 setmem_optab
[i
] = CODE_FOR_nothing
;
5096 sync_add_optab
[i
] = CODE_FOR_nothing
;
5097 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5098 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5099 sync_and_optab
[i
] = CODE_FOR_nothing
;
5100 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5101 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5102 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5103 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5104 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5105 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5106 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5107 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5108 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5109 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5110 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5111 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5112 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5113 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5114 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5115 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5116 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5117 sync_lock_release
[i
] = CODE_FOR_nothing
;
5119 #ifdef HAVE_SECONDARY_RELOADS
5120 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5124 /* Fill in the optabs with the insns we support. */
5127 /* Initialize the optabs with the names of the library functions. */
5128 init_integral_libfuncs (add_optab
, "add", '3');
5129 init_floating_libfuncs (add_optab
, "add", '3');
5130 init_integral_libfuncs (addv_optab
, "addv", '3');
5131 init_floating_libfuncs (addv_optab
, "add", '3');
5132 init_integral_libfuncs (sub_optab
, "sub", '3');
5133 init_floating_libfuncs (sub_optab
, "sub", '3');
5134 init_integral_libfuncs (subv_optab
, "subv", '3');
5135 init_floating_libfuncs (subv_optab
, "sub", '3');
5136 init_integral_libfuncs (smul_optab
, "mul", '3');
5137 init_floating_libfuncs (smul_optab
, "mul", '3');
5138 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5139 init_floating_libfuncs (smulv_optab
, "mul", '3');
5140 init_integral_libfuncs (sdiv_optab
, "div", '3');
5141 init_floating_libfuncs (sdiv_optab
, "div", '3');
5142 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5143 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5144 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5145 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5146 init_integral_libfuncs (smod_optab
, "mod", '3');
5147 init_integral_libfuncs (umod_optab
, "umod", '3');
5148 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5149 init_integral_libfuncs (and_optab
, "and", '3');
5150 init_integral_libfuncs (ior_optab
, "ior", '3');
5151 init_integral_libfuncs (xor_optab
, "xor", '3');
5152 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5153 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5154 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5155 init_integral_libfuncs (smin_optab
, "min", '3');
5156 init_floating_libfuncs (smin_optab
, "min", '3');
5157 init_integral_libfuncs (smax_optab
, "max", '3');
5158 init_floating_libfuncs (smax_optab
, "max", '3');
5159 init_integral_libfuncs (umin_optab
, "umin", '3');
5160 init_integral_libfuncs (umax_optab
, "umax", '3');
5161 init_integral_libfuncs (neg_optab
, "neg", '2');
5162 init_floating_libfuncs (neg_optab
, "neg", '2');
5163 init_integral_libfuncs (negv_optab
, "negv", '2');
5164 init_floating_libfuncs (negv_optab
, "neg", '2');
5165 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5166 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5167 init_integral_libfuncs (clz_optab
, "clz", '2');
5168 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5169 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5170 init_integral_libfuncs (parity_optab
, "parity", '2');
5172 /* Comparison libcalls for integers MUST come in pairs,
5174 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5175 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5176 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5178 /* EQ etc are floating point only. */
5179 init_floating_libfuncs (eq_optab
, "eq", '2');
5180 init_floating_libfuncs (ne_optab
, "ne", '2');
5181 init_floating_libfuncs (gt_optab
, "gt", '2');
5182 init_floating_libfuncs (ge_optab
, "ge", '2');
5183 init_floating_libfuncs (lt_optab
, "lt", '2');
5184 init_floating_libfuncs (le_optab
, "le", '2');
5185 init_floating_libfuncs (unord_optab
, "unord", '2');
5187 init_floating_libfuncs (powi_optab
, "powi", '2');
5190 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5191 MODE_INT
, MODE_FLOAT
);
5192 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5193 MODE_FLOAT
, MODE_INT
);
5194 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5195 MODE_FLOAT
, MODE_INT
);
5197 /* sext_optab is also used for FLOAT_EXTEND. */
5198 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5199 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5201 /* Use cabs for double complex abs, since systems generally have cabs.
5202 Don't define any libcall for float complex, so that cabs will be used. */
5203 if (complex_double_type_node
)
5204 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5205 = init_one_libfunc ("cabs");
5207 /* The ffs function operates on `int'. */
5208 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5209 = init_one_libfunc ("ffs");
5211 abort_libfunc
= init_one_libfunc ("abort");
5212 memcpy_libfunc
= init_one_libfunc ("memcpy");
5213 memmove_libfunc
= init_one_libfunc ("memmove");
5214 memcmp_libfunc
= init_one_libfunc ("memcmp");
5215 memset_libfunc
= init_one_libfunc ("memset");
5216 setbits_libfunc
= init_one_libfunc ("__setbits");
5218 #ifndef DONT_USE_BUILTIN_SETJMP
5219 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5220 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5222 setjmp_libfunc
= init_one_libfunc ("setjmp");
5223 longjmp_libfunc
= init_one_libfunc ("longjmp");
5225 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5226 unwind_sjlj_unregister_libfunc
5227 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5229 /* For function entry/exit instrumentation. */
5230 profile_function_entry_libfunc
5231 = init_one_libfunc ("__cyg_profile_func_enter");
5232 profile_function_exit_libfunc
5233 = init_one_libfunc ("__cyg_profile_func_exit");
5235 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5237 if (HAVE_conditional_trap
)
5238 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5240 /* Allow the target to add more libcalls or rename some, etc. */
5241 targetm
.init_libfuncs ();
5246 /* Print information about the current contents of the optabs on
5250 debug_optab_libfuncs (void)
5256 /* Dump the arithmetic optabs. */
5257 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5258 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5261 struct optab_handlers
*h
;
5264 h
= &o
->handlers
[j
];
5267 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5268 fprintf (stderr
, "%s\t%s:\t%s\n",
5269 GET_RTX_NAME (o
->code
),
5271 XSTR (h
->libfunc
, 0));
5275 /* Dump the conversion optabs. */
5276 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5277 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5278 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5281 struct optab_handlers
*h
;
5283 o
= &convert_optab_table
[i
];
5284 h
= &o
->handlers
[j
][k
];
5287 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5288 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5289 GET_RTX_NAME (o
->code
),
5292 XSTR (h
->libfunc
, 0));
5300 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5301 CODE. Return 0 on failure. */
5304 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5305 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5307 enum machine_mode mode
= GET_MODE (op1
);
5308 enum insn_code icode
;
5311 if (!HAVE_conditional_trap
)
5314 if (mode
== VOIDmode
)
5317 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5318 if (icode
== CODE_FOR_nothing
)
5322 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5323 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5329 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5331 PUT_CODE (trap_rtx
, code
);
5332 gcc_assert (HAVE_conditional_trap
);
5333 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5337 insn
= get_insns ();
5344 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5345 or unsigned operation code. */
5347 static enum rtx_code
5348 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5360 code
= unsignedp
? LTU
: LT
;
5363 code
= unsignedp
? LEU
: LE
;
5366 code
= unsignedp
? GTU
: GT
;
5369 code
= unsignedp
? GEU
: GE
;
5372 case UNORDERED_EXPR
:
5403 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5404 unsigned operators. Do not generate compare instruction. */
5407 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5409 enum rtx_code rcode
;
5411 rtx rtx_op0
, rtx_op1
;
5413 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5414 ensures that condition is a relational operation. */
5415 gcc_assert (COMPARISON_CLASS_P (cond
));
5417 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5418 t_op0
= TREE_OPERAND (cond
, 0);
5419 t_op1
= TREE_OPERAND (cond
, 1);
5421 /* Expand operands. */
5422 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5423 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5425 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5426 && GET_MODE (rtx_op0
) != VOIDmode
)
5427 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5429 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5430 && GET_MODE (rtx_op1
) != VOIDmode
)
5431 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5433 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5436 /* Return insn code for VEC_COND_EXPR EXPR. */
5438 static inline enum insn_code
5439 get_vcond_icode (tree expr
, enum machine_mode mode
)
5441 enum insn_code icode
= CODE_FOR_nothing
;
5443 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5444 icode
= vcondu_gen_code
[mode
];
5446 icode
= vcond_gen_code
[mode
];
5450 /* Return TRUE iff, appropriate vector insns are available
5451 for vector cond expr expr in VMODE mode. */
5454 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5456 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5461 /* Generate insns for VEC_COND_EXPR. */
5464 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5466 enum insn_code icode
;
5467 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5468 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5469 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5471 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5472 if (icode
== CODE_FOR_nothing
)
5476 target
= gen_reg_rtx (mode
);
5478 /* Get comparison rtx. First expand both cond expr operands. */
5479 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5481 cc_op0
= XEXP (comparison
, 0);
5482 cc_op1
= XEXP (comparison
, 1);
5483 /* Expand both operands and force them in reg, if required. */
5484 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5485 NULL_RTX
, VOIDmode
, 1);
5486 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5487 && mode
!= VOIDmode
)
5488 rtx_op1
= force_reg (mode
, rtx_op1
);
5490 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5491 NULL_RTX
, VOIDmode
, 1);
5492 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5493 && mode
!= VOIDmode
)
5494 rtx_op2
= force_reg (mode
, rtx_op2
);
5496 /* Emit instruction! */
5497 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5498 comparison
, cc_op0
, cc_op1
));
5504 /* This is an internal subroutine of the other compare_and_swap expanders.
5505 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5506 operation. TARGET is an optional place to store the value result of
5507 the operation. ICODE is the particular instruction to expand. Return
5508 the result of the operation. */
5511 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5512 rtx target
, enum insn_code icode
)
5514 enum machine_mode mode
= GET_MODE (mem
);
5517 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5518 target
= gen_reg_rtx (mode
);
5520 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5521 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5522 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5523 old_val
= force_reg (mode
, old_val
);
5525 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5526 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5527 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5528 new_val
= force_reg (mode
, new_val
);
5530 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5531 if (insn
== NULL_RTX
)
5538 /* Expand a compare-and-swap operation and return its value. */
5541 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5543 enum machine_mode mode
= GET_MODE (mem
);
5544 enum insn_code icode
= sync_compare_and_swap
[mode
];
5546 if (icode
== CODE_FOR_nothing
)
5549 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5552 /* Expand a compare-and-swap operation and store true into the result if
5553 the operation was successful and false otherwise. Return the result.
5554 Unlike other routines, TARGET is not optional. */
5557 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5559 enum machine_mode mode
= GET_MODE (mem
);
5560 enum insn_code icode
;
5561 rtx subtarget
, label0
, label1
;
5563 /* If the target supports a compare-and-swap pattern that simultaneously
5564 sets some flag for success, then use it. Otherwise use the regular
5565 compare-and-swap and follow that immediately with a compare insn. */
5566 icode
= sync_compare_and_swap_cc
[mode
];
5570 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5572 if (subtarget
!= NULL_RTX
)
5576 case CODE_FOR_nothing
:
5577 icode
= sync_compare_and_swap
[mode
];
5578 if (icode
== CODE_FOR_nothing
)
5581 /* Ensure that if old_val == mem, that we're not comparing
5582 against an old value. */
5583 if (MEM_P (old_val
))
5584 old_val
= force_reg (mode
, old_val
);
5586 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5588 if (subtarget
== NULL_RTX
)
5591 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5594 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5595 setcc instruction from the beginning. We don't work too hard here,
5596 but it's nice to not be stupid about initial code gen either. */
5597 if (STORE_FLAG_VALUE
== 1)
5599 icode
= setcc_gen_code
[EQ
];
5600 if (icode
!= CODE_FOR_nothing
)
5602 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5606 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5607 subtarget
= gen_reg_rtx (cmode
);
5609 insn
= GEN_FCN (icode
) (subtarget
);
5613 if (GET_MODE (target
) != GET_MODE (subtarget
))
5615 convert_move (target
, subtarget
, 1);
5623 /* Without an appropriate setcc instruction, use a set of branches to
5624 get 1 and 0 stored into target. Presumably if the target has a
5625 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5627 label0
= gen_label_rtx ();
5628 label1
= gen_label_rtx ();
5630 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
5631 emit_move_insn (target
, const0_rtx
);
5632 emit_jump_insn (gen_jump (label1
));
5634 emit_label (label0
);
5635 emit_move_insn (target
, const1_rtx
);
5636 emit_label (label1
);
5641 /* This is a helper function for the other atomic operations. This function
5642 emits a loop that contains SEQ that iterates until a compare-and-swap
5643 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5644 a set of instructions that takes a value from OLD_REG as an input and
5645 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5646 set to the current contents of MEM. After SEQ, a compare-and-swap will
5647 attempt to update MEM with NEW_REG. The function returns true when the
5648 loop was generated successfully. */
5651 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5653 enum machine_mode mode
= GET_MODE (mem
);
5654 enum insn_code icode
;
5655 rtx label
, cmp_reg
, subtarget
;
5657 /* The loop we want to generate looks like
5663 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5664 if (cmp_reg != old_reg)
5667 Note that we only do the plain load from memory once. Subsequent
5668 iterations use the value loaded by the compare-and-swap pattern. */
5670 label
= gen_label_rtx ();
5671 cmp_reg
= gen_reg_rtx (mode
);
5673 emit_move_insn (cmp_reg
, mem
);
5675 emit_move_insn (old_reg
, cmp_reg
);
5679 /* If the target supports a compare-and-swap pattern that simultaneously
5680 sets some flag for success, then use it. Otherwise use the regular
5681 compare-and-swap and follow that immediately with a compare insn. */
5682 icode
= sync_compare_and_swap_cc
[mode
];
5686 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5688 if (subtarget
!= NULL_RTX
)
5690 gcc_assert (subtarget
== cmp_reg
);
5695 case CODE_FOR_nothing
:
5696 icode
= sync_compare_and_swap
[mode
];
5697 if (icode
== CODE_FOR_nothing
)
5700 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5702 if (subtarget
== NULL_RTX
)
5704 if (subtarget
!= cmp_reg
)
5705 emit_move_insn (cmp_reg
, subtarget
);
5707 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
5710 /* ??? Mark this jump predicted not taken? */
5711 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
5716 /* This function generates the atomic operation MEM CODE= VAL. In this
5717 case, we do not care about any resulting value. Returns NULL if we
5718 cannot generate the operation. */
5721 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
5723 enum machine_mode mode
= GET_MODE (mem
);
5724 enum insn_code icode
;
5727 /* Look to see if the target supports the operation directly. */
5731 icode
= sync_add_optab
[mode
];
5734 icode
= sync_ior_optab
[mode
];
5737 icode
= sync_xor_optab
[mode
];
5740 icode
= sync_and_optab
[mode
];
5743 icode
= sync_nand_optab
[mode
];
5747 icode
= sync_sub_optab
[mode
];
5748 if (icode
== CODE_FOR_nothing
)
5750 icode
= sync_add_optab
[mode
];
5751 if (icode
!= CODE_FOR_nothing
)
5753 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5763 /* Generate the direct operation, if present. */
5764 if (icode
!= CODE_FOR_nothing
)
5766 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5767 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5768 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
5769 val
= force_reg (mode
, val
);
5771 insn
= GEN_FCN (icode
) (mem
, val
);
5779 /* Failing that, generate a compare-and-swap loop in which we perform the
5780 operation with normal arithmetic instructions. */
5781 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5783 rtx t0
= gen_reg_rtx (mode
), t1
;
5790 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5793 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5794 true, OPTAB_LIB_WIDEN
);
5796 insn
= get_insns ();
5799 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5806 /* This function generates the atomic operation MEM CODE= VAL. In this
5807 case, we do care about the resulting value: if AFTER is true then
5808 return the value MEM holds after the operation, if AFTER is false
5809 then return the value MEM holds before the operation. TARGET is an
5810 optional place for the result value to be stored. */
5813 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
5814 bool after
, rtx target
)
5816 enum machine_mode mode
= GET_MODE (mem
);
5817 enum insn_code old_code
, new_code
, icode
;
5821 /* Look to see if the target supports the operation directly. */
5825 old_code
= sync_old_add_optab
[mode
];
5826 new_code
= sync_new_add_optab
[mode
];
5829 old_code
= sync_old_ior_optab
[mode
];
5830 new_code
= sync_new_ior_optab
[mode
];
5833 old_code
= sync_old_xor_optab
[mode
];
5834 new_code
= sync_new_xor_optab
[mode
];
5837 old_code
= sync_old_and_optab
[mode
];
5838 new_code
= sync_new_and_optab
[mode
];
5841 old_code
= sync_old_nand_optab
[mode
];
5842 new_code
= sync_new_nand_optab
[mode
];
5846 old_code
= sync_old_sub_optab
[mode
];
5847 new_code
= sync_new_sub_optab
[mode
];
5848 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
5850 old_code
= sync_old_add_optab
[mode
];
5851 new_code
= sync_new_add_optab
[mode
];
5852 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
5854 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5864 /* If the target does supports the proper new/old operation, great. But
5865 if we only support the opposite old/new operation, check to see if we
5866 can compensate. In the case in which the old value is supported, then
5867 we can always perform the operation again with normal arithmetic. In
5868 the case in which the new value is supported, then we can only handle
5869 this in the case the operation is reversible. */
5874 if (icode
== CODE_FOR_nothing
)
5877 if (icode
!= CODE_FOR_nothing
)
5884 if (icode
== CODE_FOR_nothing
5885 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
5888 if (icode
!= CODE_FOR_nothing
)
5893 /* If we found something supported, great. */
5894 if (icode
!= CODE_FOR_nothing
)
5896 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5897 target
= gen_reg_rtx (mode
);
5899 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5900 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5901 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5902 val
= force_reg (mode
, val
);
5904 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5909 /* If we need to compensate for using an operation with the
5910 wrong return value, do so now. */
5917 else if (code
== MINUS
)
5922 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
5923 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
5924 true, OPTAB_LIB_WIDEN
);
5931 /* Failing that, generate a compare-and-swap loop in which we perform the
5932 operation with normal arithmetic instructions. */
5933 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5935 rtx t0
= gen_reg_rtx (mode
), t1
;
5937 if (!target
|| !register_operand (target
, mode
))
5938 target
= gen_reg_rtx (mode
);
5943 emit_move_insn (target
, t0
);
5947 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5950 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5951 true, OPTAB_LIB_WIDEN
);
5953 emit_move_insn (target
, t1
);
5955 insn
= get_insns ();
5958 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5965 /* This function expands a test-and-set operation. Ideally we atomically
5966 store VAL in MEM and return the previous value in MEM. Some targets
5967 may not support this operation and only support VAL with the constant 1;
5968 in this case while the return value will be 0/1, but the exact value
5969 stored in MEM is target defined. TARGET is an option place to stick
5970 the return value. */
5973 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
5975 enum machine_mode mode
= GET_MODE (mem
);
5976 enum insn_code icode
;
5979 /* If the target supports the test-and-set directly, great. */
5980 icode
= sync_lock_test_and_set
[mode
];
5981 if (icode
!= CODE_FOR_nothing
)
5983 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5984 target
= gen_reg_rtx (mode
);
5986 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5987 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5988 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5989 val
= force_reg (mode
, val
);
5991 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5999 /* Otherwise, use a compare-and-swap loop for the exchange. */
6000 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6002 if (!target
|| !register_operand (target
, mode
))
6003 target
= gen_reg_rtx (mode
);
6004 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6005 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6006 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6013 #include "gt-optabs.h"