1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[COI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
149 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
151 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code
) != RTX_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_UNARY
)
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target
, op0
)
179 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
181 insn
= PREV_INSN (last_insn
);
182 while (insn
!= NULL_RTX
)
184 if (reg_set_p (target
, insn
))
187 insn
= PREV_INSN (insn
);
191 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
192 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
194 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
196 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
209 int unsignedp
, int no_extend
)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
222 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
223 return convert_modes (mode
, oldmode
, op
, unsignedp
);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
228 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result
= gen_reg_rtx (mode
);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
235 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code
, tree type
)
257 return one_cmpl_optab
;
266 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
274 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
280 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
289 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
292 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
294 case REALIGN_LOAD_EXPR
:
295 return vec_realign_load_optab
;
298 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
301 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
303 case REDUC_PLUS_EXPR
:
304 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
306 case VEC_LSHIFT_EXPR
:
307 return vec_shl_optab
;
309 case VEC_RSHIFT_EXPR
:
310 return vec_shr_optab
;
316 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
320 return trapv
? addv_optab
: add_optab
;
323 return trapv
? subv_optab
: sub_optab
;
326 return trapv
? smulv_optab
: smul_optab
;
329 return trapv
? negv_optab
: neg_optab
;
332 return trapv
? absv_optab
: abs_optab
;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
353 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
355 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
356 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
357 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
358 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
361 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
363 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
364 != CODE_FOR_nothing
);
366 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
367 temp
= gen_reg_rtx (mode
);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
378 xop0
= convert_modes (mode0
,
379 GET_MODE (op0
) != VOIDmode
384 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
385 xop1
= convert_modes (mode1
,
386 GET_MODE (op1
) != VOIDmode
391 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
392 xop2
= convert_modes (mode2
,
393 GET_MODE (op2
) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
402 && mode0
!= VOIDmode
)
403 xop0
= copy_to_mode_reg (mode0
, xop0
);
405 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
406 && mode1
!= VOIDmode
)
407 xop1
= copy_to_mode_reg (mode1
, xop1
);
409 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
410 && mode2
!= VOIDmode
)
411 xop2
= copy_to_mode_reg (mode2
, xop2
);
413 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
426 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
427 enum optab_methods methods
)
429 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
430 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
432 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode
, optab binoptab
,
440 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
441 enum optab_methods methods
)
443 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
444 target
, unsignedp
, methods
);
448 emit_move_insn (target
, x
);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
457 enum insn_code icode
;
458 rtx rtx_op1
, rtx_op2
;
459 enum machine_mode mode1
;
460 enum machine_mode mode2
;
461 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
462 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
463 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
467 switch (TREE_CODE (vec_shift_expr
))
469 case VEC_RSHIFT_EXPR
:
470 shift_optab
= vec_shr_optab
;
472 case VEC_LSHIFT_EXPR
:
473 shift_optab
= vec_shl_optab
;
479 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
480 gcc_assert (icode
!= CODE_FOR_nothing
);
482 mode1
= insn_data
[icode
].operand
[1].mode
;
483 mode2
= insn_data
[icode
].operand
[2].mode
;
485 rtx_op1
= expand_expr (vec_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
486 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
487 && mode1
!= VOIDmode
)
488 rtx_op1
= force_reg (mode1
, rtx_op1
);
490 rtx_op2
= expand_expr (shift_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
491 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
492 && mode2
!= VOIDmode
)
493 rtx_op2
= force_reg (mode2
, rtx_op2
);
496 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
497 target
= gen_reg_rtx (mode
);
499 /* Emit instruction */
500 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
515 rtx outof_target
, rtx into_target
,
516 int unsignedp
, enum optab_methods methods
)
518 if (into_target
!= 0)
519 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
520 into_target
, unsignedp
, methods
))
523 if (outof_target
!= 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab
!= ashr_optab
)
528 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
530 if (!force_expand_binop (word_mode
, binoptab
,
531 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
532 outof_target
, unsignedp
, methods
))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
544 rtx outof_input
, rtx into_input
, rtx op1
,
545 rtx outof_target
, rtx into_target
,
546 int unsignedp
, enum optab_methods methods
,
547 unsigned HOST_WIDE_INT shift_mask
)
549 optab reverse_unsigned_shift
, unsigned_shift
;
552 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
553 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
560 carries
= outof_input
;
561 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
562 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
573 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
574 if (shift_mask
== BITS_PER_WORD
- 1)
576 tmp
= immed_double_const (-1, -1, op1_mode
);
577 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
582 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
583 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
587 if (tmp
== 0 || carries
== 0)
589 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
590 carries
, tmp
, 0, unsignedp
, methods
);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
597 into_target
, unsignedp
, methods
);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
603 into_target
, unsignedp
, methods
))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target
!= 0)
608 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
609 outof_target
, unsignedp
, methods
))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
625 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
626 rtx outof_input
, rtx into_input
,
627 rtx subword_op1
, rtx superword_op1
,
628 rtx outof_target
, rtx into_target
,
629 int unsignedp
, enum optab_methods methods
,
630 unsigned HOST_WIDE_INT shift_mask
)
632 rtx outof_superword
, into_superword
;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
637 if (outof_target
!= 0 && subword_op1
== superword_op1
)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword
= outof_target
;
642 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
643 outof_superword
, 0, unsignedp
, methods
))
648 into_superword
= gen_reg_rtx (word_mode
);
649 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
650 outof_superword
, into_superword
,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode
, binoptab
,
657 outof_input
, into_input
, subword_op1
,
658 outof_target
, into_target
,
659 unsignedp
, methods
, shift_mask
))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
665 into_target
, into_superword
, word_mode
, false))
668 if (outof_target
!= 0)
669 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
670 outof_target
, outof_superword
,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
709 rtx outof_input
, rtx into_input
, rtx op1
,
710 rtx outof_target
, rtx into_target
,
711 int unsignedp
, enum optab_methods methods
,
712 unsigned HOST_WIDE_INT shift_mask
)
714 rtx superword_op1
, tmp
, cmp1
, cmp2
;
715 rtx subword_label
, done_label
;
716 enum rtx_code cmp_code
;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask
>= BITS_PER_WORD
728 && !CONSTANT_P (op1
))
730 if (!expand_doubleword_shift (op1_mode
, binoptab
,
731 outof_input
, into_input
, op1
,
733 unsignedp
, methods
, shift_mask
))
735 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
736 outof_target
, unsignedp
, methods
))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
746 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
752 cmp2
= CONST0_RTX (op1_mode
);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
761 cmp2
= CONST0_RTX (op1_mode
);
763 superword_op1
= cmp1
;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
771 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
773 if (tmp
== const0_rtx
)
774 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
775 outof_target
, into_target
,
778 return expand_subword_shift (op1_mode
, binoptab
,
779 outof_input
, into_input
, op1
,
780 outof_target
, into_target
,
781 unsignedp
, methods
, shift_mask
);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start
= get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
789 cmp_code
, cmp1
, cmp2
,
790 outof_input
, into_input
,
792 outof_target
, into_target
,
793 unsignedp
, methods
, shift_mask
))
795 delete_insns_since (start
);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label
= gen_label_rtx ();
801 done_label
= gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
804 0, 0, subword_label
);
806 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
807 outof_target
, into_target
,
811 emit_jump_insn (gen_jump (done_label
));
813 emit_label (subword_label
);
815 if (!expand_subword_shift (op1_mode
, binoptab
,
816 outof_input
, into_input
, op1
,
817 outof_target
, into_target
,
818 unsignedp
, methods
, shift_mask
))
821 emit_label (done_label
);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
883 bool umulp
, enum optab_methods methods
)
885 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
886 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
887 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
888 rtx product
, adjust
, product_high
, temp
;
890 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
891 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
892 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
893 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
909 NULL_RTX
, 1, methods
);
911 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
912 NULL_RTX
, 0, OPTAB_DIRECT
);
915 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
916 NULL_RTX
, 0, methods
);
919 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
920 NULL_RTX
, 0, OPTAB_DIRECT
);
927 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
928 NULL_RTX
, 0, OPTAB_DIRECT
);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
938 NULL_RTX
, 1, methods
);
940 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
941 NULL_RTX
, 0, OPTAB_DIRECT
);
944 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
945 NULL_RTX
, 0, methods
);
948 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
949 NULL_RTX
, 0, OPTAB_DIRECT
);
956 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
957 NULL_RTX
, 0, OPTAB_DIRECT
);
961 /* OP1_HIGH should now be dead. */
963 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
964 adjust
, 0, OPTAB_DIRECT
);
966 if (target
&& !REG_P (target
))
970 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
971 target
, 1, OPTAB_DIRECT
);
973 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
974 target
, 1, OPTAB_DIRECT
);
979 product_high
= operand_subword (product
, high
, 1, mode
);
980 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
981 REG_P (product_high
) ? product_high
: adjust
,
983 emit_move_insn (product_high
, adjust
);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
992 rtx op1
, rtx target
, int unsignedp
,
993 enum optab_methods methods
)
995 optab binop
= code_to_optab
[(int) code
];
998 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1001 /* Generate code to perform an operation specified by BINOPTAB
1002 on operands OP0 and OP1, with result having machine-mode MODE.
1004 UNSIGNEDP is for the case where we have to widen the operands
1005 to perform the operation. It says to use zero-extension.
1007 If TARGET is nonzero, the value
1008 is generated there, if it is convenient to do so.
1009 In all cases an rtx is returned for the locus of the value;
1010 this may or may not be TARGET. */
1013 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1014 rtx target
, int unsignedp
, enum optab_methods methods
)
1016 enum optab_methods next_methods
1017 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1018 ? OPTAB_WIDEN
: methods
);
1019 enum mode_class
class;
1020 enum machine_mode wider_mode
;
1022 int commutative_op
= 0;
1023 int shift_op
= (binoptab
->code
== ASHIFT
1024 || binoptab
->code
== ASHIFTRT
1025 || binoptab
->code
== LSHIFTRT
1026 || binoptab
->code
== ROTATE
1027 || binoptab
->code
== ROTATERT
);
1028 rtx entry_last
= get_last_insn ();
1031 class = GET_MODE_CLASS (mode
);
1033 /* If subtracting an integer constant, convert this into an addition of
1034 the negated constant. */
1036 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1038 op1
= negate_rtx (mode
, op1
);
1039 binoptab
= add_optab
;
1042 /* If we are inside an appropriately-short loop and we are optimizing,
1043 force expensive constants into a register. */
1044 if (CONSTANT_P (op0
) && optimize
1045 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1047 if (GET_MODE (op0
) != VOIDmode
)
1048 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1049 op0
= force_reg (mode
, op0
);
1052 if (CONSTANT_P (op1
) && optimize
1053 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1055 if (GET_MODE (op1
) != VOIDmode
)
1056 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1057 op1
= force_reg (mode
, op1
);
1060 /* Record where to delete back to if we backtrack. */
1061 last
= get_last_insn ();
1063 /* If operation is commutative,
1064 try to make the first operand a register.
1065 Even better, try to make it the same as the target.
1066 Also try to make the last operand a constant. */
1067 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1068 || binoptab
== smul_widen_optab
1069 || binoptab
== umul_widen_optab
1070 || binoptab
== smul_highpart_optab
1071 || binoptab
== umul_highpart_optab
)
1075 if (((target
== 0 || REG_P (target
))
1079 : rtx_equal_p (op1
, target
))
1080 || GET_CODE (op0
) == CONST_INT
)
1088 /* If we can do it with a three-operand insn, do so. */
1090 if (methods
!= OPTAB_MUST_WIDEN
1091 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1093 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1094 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1095 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1097 rtx xop0
= op0
, xop1
= op1
;
1102 temp
= gen_reg_rtx (mode
);
1104 /* If it is a commutative operator and the modes would match
1105 if we would swap the operands, we can save the conversions. */
1108 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1109 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1113 tmp
= op0
; op0
= op1
; op1
= tmp
;
1114 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1118 /* In case the insn wants input operands in modes different from
1119 those of the actual operands, convert the operands. It would
1120 seem that we don't need to convert CONST_INTs, but we do, so
1121 that they're properly zero-extended, sign-extended or truncated
1124 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1125 xop0
= convert_modes (mode0
,
1126 GET_MODE (op0
) != VOIDmode
1131 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1132 xop1
= convert_modes (mode1
,
1133 GET_MODE (op1
) != VOIDmode
1138 /* Now, if insn's predicates don't allow our operands, put them into
1141 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1142 && mode0
!= VOIDmode
)
1143 xop0
= copy_to_mode_reg (mode0
, xop0
);
1145 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1146 && mode1
!= VOIDmode
)
1147 xop1
= copy_to_mode_reg (mode1
, xop1
);
1149 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
1150 temp
= gen_reg_rtx (mode
);
1152 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1155 /* If PAT is composed of more than one insn, try to add an appropriate
1156 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1157 operand, call ourselves again, this time without a target. */
1158 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1159 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1161 delete_insns_since (last
);
1162 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1163 unsignedp
, methods
);
1170 delete_insns_since (last
);
1173 /* If this is a multiply, see if we can do a widening operation that
1174 takes operands of this mode and makes a wider mode. */
1176 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1177 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1178 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1179 != CODE_FOR_nothing
))
1181 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1182 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1183 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1187 if (GET_MODE_CLASS (mode
) == MODE_INT
1188 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1189 GET_MODE_BITSIZE (GET_MODE (temp
))))
1190 return gen_lowpart (mode
, temp
);
1192 return convert_to_mode (mode
, temp
, unsignedp
);
1196 /* Look for a wider mode of the same class for which we think we
1197 can open-code the operation. Check for a widening multiply at the
1198 wider mode as well. */
1200 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1201 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1202 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1203 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1205 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1206 || (binoptab
== smul_optab
1207 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1208 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1209 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1210 != CODE_FOR_nothing
)))
1212 rtx xop0
= op0
, xop1
= op1
;
1215 /* For certain integer operations, we need not actually extend
1216 the narrow operands, as long as we will truncate
1217 the results to the same narrowness. */
1219 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1220 || binoptab
== xor_optab
1221 || binoptab
== add_optab
|| binoptab
== sub_optab
1222 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1223 && class == MODE_INT
)
1226 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1228 /* The second operand of a shift must always be extended. */
1229 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1230 no_extend
&& binoptab
!= ashl_optab
);
1232 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1233 unsignedp
, OPTAB_DIRECT
);
1236 if (class != MODE_INT
1237 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1238 GET_MODE_BITSIZE (wider_mode
)))
1241 target
= gen_reg_rtx (mode
);
1242 convert_move (target
, temp
, 0);
1246 return gen_lowpart (mode
, temp
);
1249 delete_insns_since (last
);
1253 /* These can be done a word at a time. */
1254 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1255 && class == MODE_INT
1256 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1257 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1263 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1264 won't be accurate, so use a new target. */
1265 if (target
== 0 || target
== op0
|| target
== op1
)
1266 target
= gen_reg_rtx (mode
);
1270 /* Do the actual arithmetic. */
1271 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1273 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1274 rtx x
= expand_binop (word_mode
, binoptab
,
1275 operand_subword_force (op0
, i
, mode
),
1276 operand_subword_force (op1
, i
, mode
),
1277 target_piece
, unsignedp
, next_methods
);
1282 if (target_piece
!= x
)
1283 emit_move_insn (target_piece
, x
);
1286 insns
= get_insns ();
1289 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1291 if (binoptab
->code
!= UNKNOWN
)
1293 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1294 copy_rtx (op0
), copy_rtx (op1
));
1298 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1303 /* Synthesize double word shifts from single word shifts. */
1304 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1305 || binoptab
== ashr_optab
)
1306 && class == MODE_INT
1307 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1308 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1309 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1310 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1311 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1313 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1314 enum machine_mode op1_mode
;
1316 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1317 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1318 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1320 /* Apply the truncation to constant shifts. */
1321 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1322 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1324 if (op1
== CONST0_RTX (op1_mode
))
1327 /* Make sure that this is a combination that expand_doubleword_shift
1328 can handle. See the comments there for details. */
1329 if (double_shift_mask
== 0
1330 || (shift_mask
== BITS_PER_WORD
- 1
1331 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1333 rtx insns
, equiv_value
;
1334 rtx into_target
, outof_target
;
1335 rtx into_input
, outof_input
;
1336 int left_shift
, outof_word
;
1338 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1339 won't be accurate, so use a new target. */
1340 if (target
== 0 || target
== op0
|| target
== op1
)
1341 target
= gen_reg_rtx (mode
);
1345 /* OUTOF_* is the word we are shifting bits away from, and
1346 INTO_* is the word that we are shifting bits towards, thus
1347 they differ depending on the direction of the shift and
1348 WORDS_BIG_ENDIAN. */
1350 left_shift
= binoptab
== ashl_optab
;
1351 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1353 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1354 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1356 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1357 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1359 if (expand_doubleword_shift (op1_mode
, binoptab
,
1360 outof_input
, into_input
, op1
,
1361 outof_target
, into_target
,
1362 unsignedp
, methods
, shift_mask
))
1364 insns
= get_insns ();
1367 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1368 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1375 /* Synthesize double word rotates from single word shifts. */
1376 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1377 && class == MODE_INT
1378 && GET_CODE (op1
) == CONST_INT
1379 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1380 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1381 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1383 rtx insns
, equiv_value
;
1384 rtx into_target
, outof_target
;
1385 rtx into_input
, outof_input
;
1387 int shift_count
, left_shift
, outof_word
;
1389 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1390 won't be accurate, so use a new target. Do this also if target is not
1391 a REG, first because having a register instead may open optimization
1392 opportunities, and second because if target and op0 happen to be MEMs
1393 designating the same location, we would risk clobbering it too early
1394 in the code sequence we generate below. */
1395 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1396 target
= gen_reg_rtx (mode
);
1400 shift_count
= INTVAL (op1
);
1402 /* OUTOF_* is the word we are shifting bits away from, and
1403 INTO_* is the word that we are shifting bits towards, thus
1404 they differ depending on the direction of the shift and
1405 WORDS_BIG_ENDIAN. */
1407 left_shift
= (binoptab
== rotl_optab
);
1408 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1410 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1411 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1413 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1414 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1416 if (shift_count
== BITS_PER_WORD
)
1418 /* This is just a word swap. */
1419 emit_move_insn (outof_target
, into_input
);
1420 emit_move_insn (into_target
, outof_input
);
1425 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1426 rtx first_shift_count
, second_shift_count
;
1427 optab reverse_unsigned_shift
, unsigned_shift
;
1429 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1430 ? lshr_optab
: ashl_optab
);
1432 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1433 ? ashl_optab
: lshr_optab
);
1435 if (shift_count
> BITS_PER_WORD
)
1437 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1438 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1442 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1443 second_shift_count
= GEN_INT (shift_count
);
1446 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1447 outof_input
, first_shift_count
,
1448 NULL_RTX
, unsignedp
, next_methods
);
1449 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1450 into_input
, second_shift_count
,
1451 NULL_RTX
, unsignedp
, next_methods
);
1453 if (into_temp1
!= 0 && into_temp2
!= 0)
1454 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1455 into_target
, unsignedp
, next_methods
);
1459 if (inter
!= 0 && inter
!= into_target
)
1460 emit_move_insn (into_target
, inter
);
1462 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1463 into_input
, first_shift_count
,
1464 NULL_RTX
, unsignedp
, next_methods
);
1465 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1466 outof_input
, second_shift_count
,
1467 NULL_RTX
, unsignedp
, next_methods
);
1469 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1470 inter
= expand_binop (word_mode
, ior_optab
,
1471 outof_temp1
, outof_temp2
,
1472 outof_target
, unsignedp
, next_methods
);
1474 if (inter
!= 0 && inter
!= outof_target
)
1475 emit_move_insn (outof_target
, inter
);
1478 insns
= get_insns ();
1483 if (binoptab
->code
!= UNKNOWN
)
1484 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1488 /* We can't make this a no conflict block if this is a word swap,
1489 because the word swap case fails if the input and output values
1490 are in the same register. */
1491 if (shift_count
!= BITS_PER_WORD
)
1492 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1501 /* These can be done a word at a time by propagating carries. */
1502 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1503 && class == MODE_INT
1504 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1505 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1508 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1509 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1510 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1511 rtx xop0
, xop1
, xtarget
;
1513 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1514 value is one of those, use it. Otherwise, use 1 since it is the
1515 one easiest to get. */
1516 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1517 int normalizep
= STORE_FLAG_VALUE
;
1522 /* Prepare the operands. */
1523 xop0
= force_reg (mode
, op0
);
1524 xop1
= force_reg (mode
, op1
);
1526 xtarget
= gen_reg_rtx (mode
);
1528 if (target
== 0 || !REG_P (target
))
1531 /* Indicate for flow that the entire target reg is being set. */
1533 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1535 /* Do the actual arithmetic. */
1536 for (i
= 0; i
< nwords
; i
++)
1538 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1539 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1540 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1541 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1544 /* Main add/subtract of the input operands. */
1545 x
= expand_binop (word_mode
, binoptab
,
1546 op0_piece
, op1_piece
,
1547 target_piece
, unsignedp
, next_methods
);
1553 /* Store carry from main add/subtract. */
1554 carry_out
= gen_reg_rtx (word_mode
);
1555 carry_out
= emit_store_flag_force (carry_out
,
1556 (binoptab
== add_optab
1559 word_mode
, 1, normalizep
);
1566 /* Add/subtract previous carry to main result. */
1567 newx
= expand_binop (word_mode
,
1568 normalizep
== 1 ? binoptab
: otheroptab
,
1570 NULL_RTX
, 1, next_methods
);
1574 /* Get out carry from adding/subtracting carry in. */
1575 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1576 carry_tmp
= emit_store_flag_force (carry_tmp
,
1577 (binoptab
== add_optab
1580 word_mode
, 1, normalizep
);
1582 /* Logical-ior the two poss. carry together. */
1583 carry_out
= expand_binop (word_mode
, ior_optab
,
1584 carry_out
, carry_tmp
,
1585 carry_out
, 0, next_methods
);
1589 emit_move_insn (target_piece
, newx
);
1593 if (x
!= target_piece
)
1594 emit_move_insn (target_piece
, x
);
1597 carry_in
= carry_out
;
1600 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1602 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1603 || ! rtx_equal_p (target
, xtarget
))
1605 rtx temp
= emit_move_insn (target
, xtarget
);
1607 set_unique_reg_note (temp
,
1609 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1620 delete_insns_since (last
);
1623 /* Attempt to synthesize double word multiplies using a sequence of word
1624 mode multiplications. We first attempt to generate a sequence using a
1625 more efficient unsigned widening multiply, and if that fails we then
1626 try using a signed widening multiply. */
1628 if (binoptab
== smul_optab
1629 && class == MODE_INT
1630 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1631 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1632 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1634 rtx product
= NULL_RTX
;
1636 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1637 != CODE_FOR_nothing
)
1639 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1642 delete_insns_since (last
);
1645 if (product
== NULL_RTX
1646 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1647 != CODE_FOR_nothing
)
1649 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1652 delete_insns_since (last
);
1655 if (product
!= NULL_RTX
)
1657 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1659 temp
= emit_move_insn (target
? target
: product
, product
);
1660 set_unique_reg_note (temp
,
1662 gen_rtx_fmt_ee (MULT
, mode
,
1670 /* It can't be open-coded in this mode.
1671 Use a library call if one is available and caller says that's ok. */
1673 if (binoptab
->handlers
[(int) mode
].libfunc
1674 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1678 enum machine_mode op1_mode
= mode
;
1685 op1_mode
= word_mode
;
1686 /* Specify unsigned here,
1687 since negative shift counts are meaningless. */
1688 op1x
= convert_to_mode (word_mode
, op1
, 1);
1691 if (GET_MODE (op0
) != VOIDmode
1692 && GET_MODE (op0
) != mode
)
1693 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1695 /* Pass 1 for NO_QUEUE so we don't lose any increments
1696 if the libcall is cse'd or moved. */
1697 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1698 NULL_RTX
, LCT_CONST
, mode
, 2,
1699 op0
, mode
, op1x
, op1_mode
);
1701 insns
= get_insns ();
1704 target
= gen_reg_rtx (mode
);
1705 emit_libcall_block (insns
, target
, value
,
1706 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1711 delete_insns_since (last
);
1713 /* It can't be done in this mode. Can we do it in a wider mode? */
1715 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1716 || methods
== OPTAB_MUST_WIDEN
))
1718 /* Caller says, don't even try. */
1719 delete_insns_since (entry_last
);
1723 /* Compute the value of METHODS to pass to recursive calls.
1724 Don't allow widening to be tried recursively. */
1726 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1728 /* Look for a wider mode of the same class for which it appears we can do
1731 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1733 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1734 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1736 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1737 != CODE_FOR_nothing
)
1738 || (methods
== OPTAB_LIB
1739 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1741 rtx xop0
= op0
, xop1
= op1
;
1744 /* For certain integer operations, we need not actually extend
1745 the narrow operands, as long as we will truncate
1746 the results to the same narrowness. */
1748 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1749 || binoptab
== xor_optab
1750 || binoptab
== add_optab
|| binoptab
== sub_optab
1751 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1752 && class == MODE_INT
)
1755 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1756 unsignedp
, no_extend
);
1758 /* The second operand of a shift must always be extended. */
1759 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1760 no_extend
&& binoptab
!= ashl_optab
);
1762 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1763 unsignedp
, methods
);
1766 if (class != MODE_INT
1767 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1768 GET_MODE_BITSIZE (wider_mode
)))
1771 target
= gen_reg_rtx (mode
);
1772 convert_move (target
, temp
, 0);
1776 return gen_lowpart (mode
, temp
);
1779 delete_insns_since (last
);
1784 delete_insns_since (entry_last
);
1788 /* Expand a binary operator which has both signed and unsigned forms.
1789 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1792 If we widen unsigned operands, we may use a signed wider operation instead
1793 of an unsigned wider operation, since the result would be the same. */
1796 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1797 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1798 enum optab_methods methods
)
1801 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1802 struct optab wide_soptab
;
1804 /* Do it without widening, if possible. */
1805 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1806 unsignedp
, OPTAB_DIRECT
);
1807 if (temp
|| methods
== OPTAB_DIRECT
)
1810 /* Try widening to a signed int. Make a fake signed optab that
1811 hides any signed insn for direct use. */
1812 wide_soptab
= *soptab
;
1813 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1814 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1816 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1817 unsignedp
, OPTAB_WIDEN
);
1819 /* For unsigned operands, try widening to an unsigned int. */
1820 if (temp
== 0 && unsignedp
)
1821 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1822 unsignedp
, OPTAB_WIDEN
);
1823 if (temp
|| methods
== OPTAB_WIDEN
)
1826 /* Use the right width lib call if that exists. */
1827 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1828 if (temp
|| methods
== OPTAB_LIB
)
1831 /* Must widen and use a lib call, use either signed or unsigned. */
1832 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1833 unsignedp
, methods
);
1837 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1838 unsignedp
, methods
);
1842 /* Generate code to perform an operation specified by UNOPPTAB
1843 on operand OP0, with two results to TARG0 and TARG1.
1844 We assume that the order of the operands for the instruction
1845 is TARG0, TARG1, OP0.
1847 Either TARG0 or TARG1 may be zero, but what that means is that
1848 the result is not actually wanted. We will generate it into
1849 a dummy pseudo-reg and discard it. They may not both be zero.
1851 Returns 1 if this operation can be performed; 0 if not. */
1854 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1857 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1858 enum mode_class
class;
1859 enum machine_mode wider_mode
;
1860 rtx entry_last
= get_last_insn ();
1863 class = GET_MODE_CLASS (mode
);
1866 targ0
= gen_reg_rtx (mode
);
1868 targ1
= gen_reg_rtx (mode
);
1870 /* Record where to go back to if we fail. */
1871 last
= get_last_insn ();
1873 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1875 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1876 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1880 if (GET_MODE (xop0
) != VOIDmode
1881 && GET_MODE (xop0
) != mode0
)
1882 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1884 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1885 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
1886 xop0
= copy_to_mode_reg (mode0
, xop0
);
1888 /* We could handle this, but we should always be called with a pseudo
1889 for our targets and all insns should take them as outputs. */
1890 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
1891 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
1893 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1900 delete_insns_since (last
);
1903 /* It can't be done in this mode. Can we do it in a wider mode? */
1905 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1907 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1908 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1910 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1911 != CODE_FOR_nothing
)
1913 rtx t0
= gen_reg_rtx (wider_mode
);
1914 rtx t1
= gen_reg_rtx (wider_mode
);
1915 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1917 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1919 convert_move (targ0
, t0
, unsignedp
);
1920 convert_move (targ1
, t1
, unsignedp
);
1924 delete_insns_since (last
);
1929 delete_insns_since (entry_last
);
1933 /* Generate code to perform an operation specified by BINOPTAB
1934 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1935 We assume that the order of the operands for the instruction
1936 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1937 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1939 Either TARG0 or TARG1 may be zero, but what that means is that
1940 the result is not actually wanted. We will generate it into
1941 a dummy pseudo-reg and discard it. They may not both be zero.
1943 Returns 1 if this operation can be performed; 0 if not. */
1946 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1949 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1950 enum mode_class
class;
1951 enum machine_mode wider_mode
;
1952 rtx entry_last
= get_last_insn ();
1955 class = GET_MODE_CLASS (mode
);
1957 /* If we are inside an appropriately-short loop and we are optimizing,
1958 force expensive constants into a register. */
1959 if (CONSTANT_P (op0
) && optimize
1960 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1961 op0
= force_reg (mode
, op0
);
1963 if (CONSTANT_P (op1
) && optimize
1964 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1965 op1
= force_reg (mode
, op1
);
1968 targ0
= gen_reg_rtx (mode
);
1970 targ1
= gen_reg_rtx (mode
);
1972 /* Record where to go back to if we fail. */
1973 last
= get_last_insn ();
1975 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1977 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1978 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1979 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1981 rtx xop0
= op0
, xop1
= op1
;
1983 /* In case the insn wants input operands in modes different from
1984 those of the actual operands, convert the operands. It would
1985 seem that we don't need to convert CONST_INTs, but we do, so
1986 that they're properly zero-extended, sign-extended or truncated
1989 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1990 xop0
= convert_modes (mode0
,
1991 GET_MODE (op0
) != VOIDmode
1996 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1997 xop1
= convert_modes (mode1
,
1998 GET_MODE (op1
) != VOIDmode
2003 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2004 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2005 xop0
= copy_to_mode_reg (mode0
, xop0
);
2007 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2008 xop1
= copy_to_mode_reg (mode1
, xop1
);
2010 /* We could handle this, but we should always be called with a pseudo
2011 for our targets and all insns should take them as outputs. */
2012 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2013 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2015 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2022 delete_insns_since (last
);
2025 /* It can't be done in this mode. Can we do it in a wider mode? */
2027 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2029 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2030 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2032 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2033 != CODE_FOR_nothing
)
2035 rtx t0
= gen_reg_rtx (wider_mode
);
2036 rtx t1
= gen_reg_rtx (wider_mode
);
2037 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2038 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2040 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2043 convert_move (targ0
, t0
, unsignedp
);
2044 convert_move (targ1
, t1
, unsignedp
);
2048 delete_insns_since (last
);
2053 delete_insns_since (entry_last
);
2057 /* Expand the two-valued library call indicated by BINOPTAB, but
2058 preserve only one of the values. If TARG0 is non-NULL, the first
2059 value is placed into TARG0; otherwise the second value is placed
2060 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2061 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2062 This routine assumes that the value returned by the library call is
2063 as if the return value was of an integral mode twice as wide as the
2064 mode of OP0. Returns 1 if the call was successful. */
2067 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2068 rtx targ0
, rtx targ1
, enum rtx_code code
)
2070 enum machine_mode mode
;
2071 enum machine_mode libval_mode
;
2075 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2076 gcc_assert (!targ0
!= !targ1
);
2078 mode
= GET_MODE (op0
);
2079 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2082 /* The value returned by the library function will have twice as
2083 many bits as the nominal MODE. */
2084 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2087 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2088 NULL_RTX
, LCT_CONST
,
2092 /* Get the part of VAL containing the value that we want. */
2093 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2094 targ0
? 0 : GET_MODE_SIZE (mode
));
2095 insns
= get_insns ();
2097 /* Move the into the desired location. */
2098 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2099 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2105 /* Wrapper around expand_unop which takes an rtx code to specify
2106 the operation to perform, not an optab pointer. All other
2107 arguments are the same. */
2109 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2110 rtx target
, int unsignedp
)
2112 optab unop
= code_to_optab
[(int) code
];
2115 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2121 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2123 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2125 enum mode_class
class = GET_MODE_CLASS (mode
);
2126 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2128 enum machine_mode wider_mode
;
2129 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2130 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2132 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2133 != CODE_FOR_nothing
)
2135 rtx xop0
, temp
, last
;
2137 last
= get_last_insn ();
2140 target
= gen_reg_rtx (mode
);
2141 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2142 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2144 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2145 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2146 - GET_MODE_BITSIZE (mode
)),
2147 target
, true, OPTAB_DIRECT
);
2149 delete_insns_since (last
);
2158 /* Try calculating (parity x) as (and (popcount x) 1), where
2159 popcount can also be done in a wider mode. */
2161 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2163 enum mode_class
class = GET_MODE_CLASS (mode
);
2164 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2166 enum machine_mode wider_mode
;
2167 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2168 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2170 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2171 != CODE_FOR_nothing
)
2173 rtx xop0
, temp
, last
;
2175 last
= get_last_insn ();
2178 target
= gen_reg_rtx (mode
);
2179 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2180 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2183 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2184 target
, true, OPTAB_DIRECT
);
2186 delete_insns_since (last
);
2195 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2196 conditions, VAL may already be a SUBREG against which we cannot generate
2197 a further SUBREG. In this case, we expect forcing the value into a
2198 register will work around the situation. */
2201 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2202 enum machine_mode imode
)
2205 ret
= lowpart_subreg (omode
, val
, imode
);
2208 val
= force_reg (imode
, val
);
2209 ret
= lowpart_subreg (omode
, val
, imode
);
2210 gcc_assert (ret
!= NULL
);
2215 /* Expand a floating point absolute value or negation operation via a
2216 logical operation on the sign bit. */
2219 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2220 rtx op0
, rtx target
)
2222 const struct real_format
*fmt
;
2223 int bitpos
, word
, nwords
, i
;
2224 enum machine_mode imode
;
2225 HOST_WIDE_INT hi
, lo
;
2228 /* The format has to have a simple sign bit. */
2229 fmt
= REAL_MODE_FORMAT (mode
);
2233 bitpos
= fmt
->signbit_rw
;
2237 /* Don't create negative zeros if the format doesn't support them. */
2238 if (code
== NEG
&& !fmt
->has_signed_zero
)
2241 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2243 imode
= int_mode_for_mode (mode
);
2244 if (imode
== BLKmode
)
2253 if (FLOAT_WORDS_BIG_ENDIAN
)
2254 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2256 word
= bitpos
/ BITS_PER_WORD
;
2257 bitpos
= bitpos
% BITS_PER_WORD
;
2258 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2261 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2264 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2268 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2274 if (target
== 0 || target
== op0
)
2275 target
= gen_reg_rtx (mode
);
2281 for (i
= 0; i
< nwords
; ++i
)
2283 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2284 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2288 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2290 immed_double_const (lo
, hi
, imode
),
2291 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2292 if (temp
!= targ_piece
)
2293 emit_move_insn (targ_piece
, temp
);
2296 emit_move_insn (targ_piece
, op0_piece
);
2299 insns
= get_insns ();
2302 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2303 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2307 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2308 gen_lowpart (imode
, op0
),
2309 immed_double_const (lo
, hi
, imode
),
2310 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2311 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2313 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2314 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2320 /* Generate code to perform an operation specified by UNOPTAB
2321 on operand OP0, with result having machine-mode MODE.
2323 UNSIGNEDP is for the case where we have to widen the operands
2324 to perform the operation. It says to use zero-extension.
2326 If TARGET is nonzero, the value
2327 is generated there, if it is convenient to do so.
2328 In all cases an rtx is returned for the locus of the value;
2329 this may or may not be TARGET. */
2332 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2335 enum mode_class
class;
2336 enum machine_mode wider_mode
;
2338 rtx last
= get_last_insn ();
2341 class = GET_MODE_CLASS (mode
);
2343 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2345 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2346 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2352 temp
= gen_reg_rtx (mode
);
2354 if (GET_MODE (xop0
) != VOIDmode
2355 && GET_MODE (xop0
) != mode0
)
2356 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2358 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2360 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2361 xop0
= copy_to_mode_reg (mode0
, xop0
);
2363 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2364 temp
= gen_reg_rtx (mode
);
2366 pat
= GEN_FCN (icode
) (temp
, xop0
);
2369 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2370 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2372 delete_insns_since (last
);
2373 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2381 delete_insns_since (last
);
2384 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2386 /* Widening clz needs special treatment. */
2387 if (unoptab
== clz_optab
)
2389 temp
= widen_clz (mode
, op0
, target
);
2396 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2397 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2398 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2400 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2404 /* For certain operations, we need not actually extend
2405 the narrow operand, as long as we will truncate the
2406 results to the same narrowness. */
2408 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2409 (unoptab
== neg_optab
2410 || unoptab
== one_cmpl_optab
)
2411 && class == MODE_INT
);
2413 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2418 if (class != MODE_INT
)
2421 target
= gen_reg_rtx (mode
);
2422 convert_move (target
, temp
, 0);
2426 return gen_lowpart (mode
, temp
);
2429 delete_insns_since (last
);
2433 /* These can be done a word at a time. */
2434 if (unoptab
== one_cmpl_optab
2435 && class == MODE_INT
2436 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2437 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2442 if (target
== 0 || target
== op0
)
2443 target
= gen_reg_rtx (mode
);
2447 /* Do the actual arithmetic. */
2448 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2450 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2451 rtx x
= expand_unop (word_mode
, unoptab
,
2452 operand_subword_force (op0
, i
, mode
),
2453 target_piece
, unsignedp
);
2455 if (target_piece
!= x
)
2456 emit_move_insn (target_piece
, x
);
2459 insns
= get_insns ();
2462 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2463 gen_rtx_fmt_e (unoptab
->code
, mode
,
2468 if (unoptab
->code
== NEG
)
2470 /* Try negating floating point values by flipping the sign bit. */
2471 if (class == MODE_FLOAT
)
2473 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2478 /* If there is no negation pattern, and we have no negative zero,
2479 try subtracting from zero. */
2480 if (!HONOR_SIGNED_ZEROS (mode
))
2482 temp
= expand_binop (mode
, (unoptab
== negv_optab
2483 ? subv_optab
: sub_optab
),
2484 CONST0_RTX (mode
), op0
, target
,
2485 unsignedp
, OPTAB_DIRECT
);
2491 /* Try calculating parity (x) as popcount (x) % 2. */
2492 if (unoptab
== parity_optab
)
2494 temp
= expand_parity (mode
, op0
, target
);
2500 /* Now try a library call in this mode. */
2501 if (unoptab
->handlers
[(int) mode
].libfunc
)
2505 enum machine_mode outmode
= mode
;
2507 /* All of these functions return small values. Thus we choose to
2508 have them return something that isn't a double-word. */
2509 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2510 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2512 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2516 /* Pass 1 for NO_QUEUE so we don't lose any increments
2517 if the libcall is cse'd or moved. */
2518 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2519 NULL_RTX
, LCT_CONST
, outmode
,
2521 insns
= get_insns ();
2524 target
= gen_reg_rtx (outmode
);
2525 emit_libcall_block (insns
, target
, value
,
2526 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2531 /* It can't be done in this mode. Can we do it in a wider mode? */
2533 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2535 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2536 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2538 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2539 != CODE_FOR_nothing
)
2540 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2544 /* For certain operations, we need not actually extend
2545 the narrow operand, as long as we will truncate the
2546 results to the same narrowness. */
2548 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2549 (unoptab
== neg_optab
2550 || unoptab
== one_cmpl_optab
)
2551 && class == MODE_INT
);
2553 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2556 /* If we are generating clz using wider mode, adjust the
2558 if (unoptab
== clz_optab
&& temp
!= 0)
2559 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2560 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2561 - GET_MODE_BITSIZE (mode
)),
2562 target
, true, OPTAB_DIRECT
);
2566 if (class != MODE_INT
)
2569 target
= gen_reg_rtx (mode
);
2570 convert_move (target
, temp
, 0);
2574 return gen_lowpart (mode
, temp
);
2577 delete_insns_since (last
);
2582 /* One final attempt at implementing negation via subtraction,
2583 this time allowing widening of the operand. */
2584 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2587 temp
= expand_binop (mode
,
2588 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2589 CONST0_RTX (mode
), op0
,
2590 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2598 /* Emit code to compute the absolute value of OP0, with result to
2599 TARGET if convenient. (TARGET may be 0.) The return value says
2600 where the result actually is to be found.
2602 MODE is the mode of the operand; the mode of the result is
2603 different but can be deduced from MODE.
2608 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2609 int result_unsignedp
)
2614 result_unsignedp
= 1;
2616 /* First try to do it with a special abs instruction. */
2617 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2622 /* For floating point modes, try clearing the sign bit. */
2623 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2625 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2630 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2631 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2632 && !HONOR_SIGNED_ZEROS (mode
))
2634 rtx last
= get_last_insn ();
2636 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2638 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2644 delete_insns_since (last
);
2647 /* If this machine has expensive jumps, we can do integer absolute
2648 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2649 where W is the width of MODE. */
2651 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2653 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2654 size_int (GET_MODE_BITSIZE (mode
) - 1),
2657 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2660 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2661 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2671 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2672 int result_unsignedp
, int safe
)
2677 result_unsignedp
= 1;
2679 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2683 /* If that does not win, use conditional jump and negate. */
2685 /* It is safe to use the target if it is the same
2686 as the source if this is also a pseudo register */
2687 if (op0
== target
&& REG_P (op0
)
2688 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2691 op1
= gen_label_rtx ();
2692 if (target
== 0 || ! safe
2693 || GET_MODE (target
) != mode
2694 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2696 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2697 target
= gen_reg_rtx (mode
);
2699 emit_move_insn (target
, op0
);
2702 /* If this mode is an integer too wide to compare properly,
2703 compare word by word. Rely on CSE to optimize constant cases. */
2704 if (GET_MODE_CLASS (mode
) == MODE_INT
2705 && ! can_compare_p (GE
, mode
, ccp_jump
))
2706 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2709 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2710 NULL_RTX
, NULL_RTX
, op1
);
2712 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2715 emit_move_insn (target
, op0
);
2721 /* A subroutine of expand_copysign, perform the copysign operation using the
2722 abs and neg primitives advertised to exist on the target. The assumption
2723 is that we have a split register file, and leaving op0 in fp registers,
2724 and not playing with subregs so much, will help the register allocator. */
2727 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2728 int bitpos
, bool op0_is_abs
)
2730 enum machine_mode imode
;
2731 HOST_WIDE_INT hi
, lo
;
2740 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2747 if (target
== NULL_RTX
)
2748 target
= copy_to_reg (op0
);
2750 emit_move_insn (target
, op0
);
2753 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2755 imode
= int_mode_for_mode (mode
);
2756 if (imode
== BLKmode
)
2758 op1
= gen_lowpart (imode
, op1
);
2763 if (FLOAT_WORDS_BIG_ENDIAN
)
2764 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2766 word
= bitpos
/ BITS_PER_WORD
;
2767 bitpos
= bitpos
% BITS_PER_WORD
;
2768 op1
= operand_subword_force (op1
, word
, mode
);
2771 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2774 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2778 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2782 op1
= expand_binop (imode
, and_optab
, op1
,
2783 immed_double_const (lo
, hi
, imode
),
2784 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2786 label
= gen_label_rtx ();
2787 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2789 if (GET_CODE (op0
) == CONST_DOUBLE
)
2790 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2792 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2794 emit_move_insn (target
, op0
);
2802 /* A subroutine of expand_copysign, perform the entire copysign operation
2803 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2804 is true if op0 is known to have its sign bit clear. */
2807 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2808 int bitpos
, bool op0_is_abs
)
2810 enum machine_mode imode
;
2811 HOST_WIDE_INT hi
, lo
;
2812 int word
, nwords
, i
;
2815 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2817 imode
= int_mode_for_mode (mode
);
2818 if (imode
== BLKmode
)
2827 if (FLOAT_WORDS_BIG_ENDIAN
)
2828 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2830 word
= bitpos
/ BITS_PER_WORD
;
2831 bitpos
= bitpos
% BITS_PER_WORD
;
2832 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2835 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2838 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2842 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2846 if (target
== 0 || target
== op0
|| target
== op1
)
2847 target
= gen_reg_rtx (mode
);
2853 for (i
= 0; i
< nwords
; ++i
)
2855 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2856 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2861 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2862 immed_double_const (~lo
, ~hi
, imode
),
2863 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2865 op1
= expand_binop (imode
, and_optab
,
2866 operand_subword_force (op1
, i
, mode
),
2867 immed_double_const (lo
, hi
, imode
),
2868 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2870 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2871 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2872 if (temp
!= targ_piece
)
2873 emit_move_insn (targ_piece
, temp
);
2876 emit_move_insn (targ_piece
, op0_piece
);
2879 insns
= get_insns ();
2882 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2886 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2887 immed_double_const (lo
, hi
, imode
),
2888 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2890 op0
= gen_lowpart (imode
, op0
);
2892 op0
= expand_binop (imode
, and_optab
, op0
,
2893 immed_double_const (~lo
, ~hi
, imode
),
2894 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2896 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2897 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2898 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2904 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2905 scalar floating point mode. Return NULL if we do not know how to
2906 expand the operation inline. */
2909 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2911 enum machine_mode mode
= GET_MODE (op0
);
2912 const struct real_format
*fmt
;
2916 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2917 gcc_assert (GET_MODE (op1
) == mode
);
2919 /* First try to do it with a special instruction. */
2920 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2921 target
, 0, OPTAB_DIRECT
);
2925 fmt
= REAL_MODE_FORMAT (mode
);
2926 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2930 if (GET_CODE (op0
) == CONST_DOUBLE
)
2932 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2933 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2937 if (fmt
->signbit_ro
>= 0
2938 && (GET_CODE (op0
) == CONST_DOUBLE
2939 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2940 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2942 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2943 fmt
->signbit_ro
, op0_is_abs
);
2948 if (fmt
->signbit_rw
< 0)
2950 return expand_copysign_bit (mode
, op0
, op1
, target
,
2951 fmt
->signbit_rw
, op0_is_abs
);
2954 /* Generate an instruction whose insn-code is INSN_CODE,
2955 with two operands: an output TARGET and an input OP0.
2956 TARGET *must* be nonzero, and the output is always stored there.
2957 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2958 the value that is stored into TARGET. */
2961 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2964 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2969 /* Now, if insn does not accept our operands, put them into pseudos. */
2971 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
2972 op0
= copy_to_mode_reg (mode0
, op0
);
2974 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
2975 temp
= gen_reg_rtx (GET_MODE (temp
));
2977 pat
= GEN_FCN (icode
) (temp
, op0
);
2979 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2980 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2985 emit_move_insn (target
, temp
);
2988 struct no_conflict_data
2990 rtx target
, first
, insn
;
2994 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
2995 if the currently examined clobber / store has to stay in the list of
2996 insns that constitute the actual no_conflict block. */
2998 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
3000 struct no_conflict_data
*p
= p0
;
3002 /* If this inns directly contributes to setting the target, it must stay. */
3003 if (reg_overlap_mentioned_p (p
->target
, dest
))
3004 p
->must_stay
= true;
3005 /* If we haven't committed to keeping any other insns in the list yet,
3006 there is nothing more to check. */
3007 else if (p
->insn
== p
->first
)
3009 /* If this insn sets / clobbers a register that feeds one of the insns
3010 already in the list, this insn has to stay too. */
3011 else if (reg_mentioned_p (dest
, PATTERN (p
->first
))
3012 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3013 /* Likewise if this insn depends on a register set by a previous
3014 insn in the list. */
3015 || (GET_CODE (set
) == SET
3016 && (modified_in_p (SET_SRC (set
), p
->first
)
3017 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
))))
3018 p
->must_stay
= true;
3021 /* Emit code to perform a series of operations on a multi-word quantity, one
3024 Such a block is preceded by a CLOBBER of the output, consists of multiple
3025 insns, each setting one word of the output, and followed by a SET copying
3026 the output to itself.
3028 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3029 note indicating that it doesn't conflict with the (also multi-word)
3030 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3033 INSNS is a block of code generated to perform the operation, not including
3034 the CLOBBER and final copy. All insns that compute intermediate values
3035 are first emitted, followed by the block as described above.
3037 TARGET, OP0, and OP1 are the output and inputs of the operations,
3038 respectively. OP1 may be zero for a unary operation.
3040 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3043 If TARGET is not a register, INSNS is simply emitted with no special
3044 processing. Likewise if anything in INSNS is not an INSN or if
3045 there is a libcall block inside INSNS.
3047 The final insn emitted is returned. */
3050 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3052 rtx prev
, next
, first
, last
, insn
;
3054 if (!REG_P (target
) || reload_in_progress
)
3055 return emit_insn (insns
);
3057 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3058 if (!NONJUMP_INSN_P (insn
)
3059 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3060 return emit_insn (insns
);
3062 /* First emit all insns that do not store into words of the output and remove
3063 these from the list. */
3064 for (insn
= insns
; insn
; insn
= next
)
3067 struct no_conflict_data data
;
3069 next
= NEXT_INSN (insn
);
3071 /* Some ports (cris) create a libcall regions at their own. We must
3072 avoid any potential nesting of LIBCALLs. */
3073 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3074 remove_note (insn
, note
);
3075 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3076 remove_note (insn
, note
);
3078 data
.target
= target
;
3082 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3083 if (! data
.must_stay
)
3085 if (PREV_INSN (insn
))
3086 NEXT_INSN (PREV_INSN (insn
)) = next
;
3091 PREV_INSN (next
) = PREV_INSN (insn
);
3097 prev
= get_last_insn ();
3099 /* Now write the CLOBBER of the output, followed by the setting of each
3100 of the words, followed by the final copy. */
3101 if (target
!= op0
&& target
!= op1
)
3102 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3104 for (insn
= insns
; insn
; insn
= next
)
3106 next
= NEXT_INSN (insn
);
3109 if (op1
&& REG_P (op1
))
3110 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3113 if (op0
&& REG_P (op0
))
3114 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3118 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3119 != CODE_FOR_nothing
)
3121 last
= emit_move_insn (target
, target
);
3123 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3127 last
= get_last_insn ();
3129 /* Remove any existing REG_EQUAL note from "last", or else it will
3130 be mistaken for a note referring to the full contents of the
3131 alleged libcall value when found together with the REG_RETVAL
3132 note added below. An existing note can come from an insn
3133 expansion at "last". */
3134 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3138 first
= get_insns ();
3140 first
= NEXT_INSN (prev
);
3142 /* Encapsulate the block so it gets manipulated as a unit. */
3143 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3145 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3150 /* Emit code to make a call to a constant function or a library call.
3152 INSNS is a list containing all insns emitted in the call.
3153 These insns leave the result in RESULT. Our block is to copy RESULT
3154 to TARGET, which is logically equivalent to EQUIV.
3156 We first emit any insns that set a pseudo on the assumption that these are
3157 loading constants into registers; doing so allows them to be safely cse'ed
3158 between blocks. Then we emit all the other insns in the block, followed by
3159 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3160 note with an operand of EQUIV.
3162 Moving assignments to pseudos outside of the block is done to improve
3163 the generated code, but is not required to generate correct code,
3164 hence being unable to move an assignment is not grounds for not making
3165 a libcall block. There are two reasons why it is safe to leave these
3166 insns inside the block: First, we know that these pseudos cannot be
3167 used in generated RTL outside the block since they are created for
3168 temporary purposes within the block. Second, CSE will not record the
3169 values of anything set inside a libcall block, so we know they must
3170 be dead at the end of the block.
3172 Except for the first group of insns (the ones setting pseudos), the
3173 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3176 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3178 rtx final_dest
= target
;
3179 rtx prev
, next
, first
, last
, insn
;
3181 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3182 into a MEM later. Protect the libcall block from this change. */
3183 if (! REG_P (target
) || REG_USERVAR_P (target
))
3184 target
= gen_reg_rtx (GET_MODE (target
));
3186 /* If we're using non-call exceptions, a libcall corresponding to an
3187 operation that may trap may also trap. */
3188 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3190 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3193 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3195 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3196 remove_note (insn
, note
);
3200 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3201 reg note to indicate that this call cannot throw or execute a nonlocal
3202 goto (unless there is already a REG_EH_REGION note, in which case
3204 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3207 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3210 XEXP (note
, 0) = constm1_rtx
;
3212 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3216 /* First emit all insns that set pseudos. Remove them from the list as
3217 we go. Avoid insns that set pseudos which were referenced in previous
3218 insns. These can be generated by move_by_pieces, for example,
3219 to update an address. Similarly, avoid insns that reference things
3220 set in previous insns. */
3222 for (insn
= insns
; insn
; insn
= next
)
3224 rtx set
= single_set (insn
);
3227 /* Some ports (cris) create a libcall regions at their own. We must
3228 avoid any potential nesting of LIBCALLs. */
3229 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3230 remove_note (insn
, note
);
3231 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3232 remove_note (insn
, note
);
3234 next
= NEXT_INSN (insn
);
3236 if (set
!= 0 && REG_P (SET_DEST (set
))
3237 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3239 || ((! INSN_P(insns
)
3240 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3241 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3242 && ! modified_in_p (SET_SRC (set
), insns
)
3243 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3245 if (PREV_INSN (insn
))
3246 NEXT_INSN (PREV_INSN (insn
)) = next
;
3251 PREV_INSN (next
) = PREV_INSN (insn
);
3256 /* Some ports use a loop to copy large arguments onto the stack.
3257 Don't move anything outside such a loop. */
3262 prev
= get_last_insn ();
3264 /* Write the remaining insns followed by the final copy. */
3266 for (insn
= insns
; insn
; insn
= next
)
3268 next
= NEXT_INSN (insn
);
3273 last
= emit_move_insn (target
, result
);
3274 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3275 != CODE_FOR_nothing
)
3276 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3279 /* Remove any existing REG_EQUAL note from "last", or else it will
3280 be mistaken for a note referring to the full contents of the
3281 libcall value when found together with the REG_RETVAL note added
3282 below. An existing note can come from an insn expansion at
3284 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3287 if (final_dest
!= target
)
3288 emit_move_insn (final_dest
, target
);
3291 first
= get_insns ();
3293 first
= NEXT_INSN (prev
);
3295 /* Encapsulate the block so it gets manipulated as a unit. */
3296 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3298 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3299 when the encapsulated region would not be in one basic block,
3300 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3302 bool attach_libcall_retval_notes
= true;
3303 next
= NEXT_INSN (last
);
3304 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3305 if (control_flow_insn_p (insn
))
3307 attach_libcall_retval_notes
= false;
3311 if (attach_libcall_retval_notes
)
3313 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3315 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3321 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3322 PURPOSE describes how this comparison will be used. CODE is the rtx
3323 comparison code we will be using.
3325 ??? Actually, CODE is slightly weaker than that. A target is still
3326 required to implement all of the normal bcc operations, but not
3327 required to implement all (or any) of the unordered bcc operations. */
3330 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3331 enum can_compare_purpose purpose
)
3335 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3337 if (purpose
== ccp_jump
)
3338 return bcc_gen_fctn
[(int) code
] != NULL
;
3339 else if (purpose
== ccp_store_flag
)
3340 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3342 /* There's only one cmov entry point, and it's allowed to fail. */
3345 if (purpose
== ccp_jump
3346 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3348 if (purpose
== ccp_cmov
3349 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3351 if (purpose
== ccp_store_flag
3352 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3354 mode
= GET_MODE_WIDER_MODE (mode
);
3356 while (mode
!= VOIDmode
);
3361 /* This function is called when we are going to emit a compare instruction that
3362 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3364 *PMODE is the mode of the inputs (in case they are const_int).
3365 *PUNSIGNEDP nonzero says that the operands are unsigned;
3366 this matters if they need to be widened.
3368 If they have mode BLKmode, then SIZE specifies the size of both operands.
3370 This function performs all the setup necessary so that the caller only has
3371 to emit a single comparison insn. This setup can involve doing a BLKmode
3372 comparison or emitting a library call to perform the comparison if no insn
3373 is available to handle it.
3374 The values which are passed in through pointers can be modified; the caller
3375 should perform the comparison on the modified values. Constant
3376 comparisons must have already been folded. */
3379 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3380 enum machine_mode
*pmode
, int *punsignedp
,
3381 enum can_compare_purpose purpose
)
3383 enum machine_mode mode
= *pmode
;
3384 rtx x
= *px
, y
= *py
;
3385 int unsignedp
= *punsignedp
;
3386 enum mode_class
class;
3388 class = GET_MODE_CLASS (mode
);
3390 /* If we are inside an appropriately-short loop and we are optimizing,
3391 force expensive constants into a register. */
3392 if (CONSTANT_P (x
) && optimize
3393 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3394 x
= force_reg (mode
, x
);
3396 if (CONSTANT_P (y
) && optimize
3397 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3398 y
= force_reg (mode
, y
);
3401 /* Make sure if we have a canonical comparison. The RTL
3402 documentation states that canonical comparisons are required only
3403 for targets which have cc0. */
3404 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3407 /* Don't let both operands fail to indicate the mode. */
3408 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3409 x
= force_reg (mode
, x
);
3411 /* Handle all BLKmode compares. */
3413 if (mode
== BLKmode
)
3415 enum machine_mode cmp_mode
, result_mode
;
3416 enum insn_code cmp_code
;
3421 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3425 /* Try to use a memory block compare insn - either cmpstr
3426 or cmpmem will do. */
3427 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3428 cmp_mode
!= VOIDmode
;
3429 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3431 cmp_code
= cmpmem_optab
[cmp_mode
];
3432 if (cmp_code
== CODE_FOR_nothing
)
3433 cmp_code
= cmpstr_optab
[cmp_mode
];
3434 if (cmp_code
== CODE_FOR_nothing
)
3435 cmp_code
= cmpstrn_optab
[cmp_mode
];
3436 if (cmp_code
== CODE_FOR_nothing
)
3439 /* Must make sure the size fits the insn's mode. */
3440 if ((GET_CODE (size
) == CONST_INT
3441 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3442 || (GET_MODE_BITSIZE (GET_MODE (size
))
3443 > GET_MODE_BITSIZE (cmp_mode
)))
3446 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3447 result
= gen_reg_rtx (result_mode
);
3448 size
= convert_to_mode (cmp_mode
, size
, 1);
3449 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3453 *pmode
= result_mode
;
3457 /* Otherwise call a library function, memcmp. */
3458 libfunc
= memcmp_libfunc
;
3459 length_type
= sizetype
;
3460 result_mode
= TYPE_MODE (integer_type_node
);
3461 cmp_mode
= TYPE_MODE (length_type
);
3462 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3463 TYPE_UNSIGNED (length_type
));
3465 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3472 *pmode
= result_mode
;
3476 /* Don't allow operands to the compare to trap, as that can put the
3477 compare and branch in different basic blocks. */
3478 if (flag_non_call_exceptions
)
3481 x
= force_reg (mode
, x
);
3483 y
= force_reg (mode
, y
);
3488 if (can_compare_p (*pcomparison
, mode
, purpose
))
3491 /* Handle a lib call just for the mode we are using. */
3493 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3495 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3498 /* If we want unsigned, and this mode has a distinct unsigned
3499 comparison routine, use that. */
3500 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3501 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3503 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3504 word_mode
, 2, x
, mode
, y
, mode
);
3508 if (TARGET_LIB_INT_CMP_BIASED
)
3509 /* Integer comparison returns a result that must be compared
3510 against 1, so that even if we do an unsigned compare
3511 afterward, there is still a value that can represent the
3512 result "less than". */
3522 gcc_assert (class == MODE_FLOAT
);
3523 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3526 /* Before emitting an insn with code ICODE, make sure that X, which is going
3527 to be used for operand OPNUM of the insn, is converted from mode MODE to
3528 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3529 that it is accepted by the operand predicate. Return the new value. */
3532 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3533 enum machine_mode wider_mode
, int unsignedp
)
3535 if (mode
!= wider_mode
)
3536 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3538 if (!insn_data
[icode
].operand
[opnum
].predicate
3539 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3543 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3549 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3550 we can do the comparison.
3551 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3552 be NULL_RTX which indicates that only a comparison is to be generated. */
3555 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3556 enum rtx_code comparison
, int unsignedp
, rtx label
)
3558 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3559 enum mode_class
class = GET_MODE_CLASS (mode
);
3560 enum machine_mode wider_mode
= mode
;
3562 /* Try combined insns first. */
3565 enum insn_code icode
;
3566 PUT_MODE (test
, wider_mode
);
3570 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3572 if (icode
!= CODE_FOR_nothing
3573 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3575 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3576 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3577 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3582 /* Handle some compares against zero. */
3583 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3584 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3586 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3587 emit_insn (GEN_FCN (icode
) (x
));
3589 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3593 /* Handle compares for which there is a directly suitable insn. */
3595 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3596 if (icode
!= CODE_FOR_nothing
)
3598 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3599 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3600 emit_insn (GEN_FCN (icode
) (x
, y
));
3602 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3606 if (class != MODE_INT
&& class != MODE_FLOAT
3607 && class != MODE_COMPLEX_FLOAT
)
3610 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3612 while (wider_mode
!= VOIDmode
);
3617 /* Generate code to compare X with Y so that the condition codes are
3618 set and to jump to LABEL if the condition is true. If X is a
3619 constant and Y is not a constant, then the comparison is swapped to
3620 ensure that the comparison RTL has the canonical form.
3622 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3623 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3624 the proper branch condition code.
3626 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3628 MODE is the mode of the inputs (in case they are const_int).
3630 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3631 be passed unchanged to emit_cmp_insn, then potentially converted into an
3632 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3635 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3636 enum machine_mode mode
, int unsignedp
, rtx label
)
3638 rtx op0
= x
, op1
= y
;
3640 /* Swap operands and condition to ensure canonical RTL. */
3641 if (swap_commutative_operands_p (x
, y
))
3643 /* If we're not emitting a branch, this means some caller
3648 comparison
= swap_condition (comparison
);
3652 /* If OP0 is still a constant, then both X and Y must be constants.
3653 Force X into a register to create canonical RTL. */
3654 if (CONSTANT_P (op0
))
3655 op0
= force_reg (mode
, op0
);
3659 comparison
= unsigned_condition (comparison
);
3661 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3663 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3666 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3669 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3670 enum machine_mode mode
, int unsignedp
)
3672 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3675 /* Emit a library call comparison between floating point X and Y.
3676 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3679 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3680 enum machine_mode
*pmode
, int *punsignedp
)
3682 enum rtx_code comparison
= *pcomparison
;
3683 enum rtx_code swapped
= swap_condition (comparison
);
3684 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3687 enum machine_mode orig_mode
= GET_MODE (x
);
3688 enum machine_mode mode
;
3689 rtx value
, target
, insns
, equiv
;
3691 bool reversed_p
= false;
3693 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3695 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3698 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3701 tmp
= x
; x
= y
; y
= tmp
;
3702 comparison
= swapped
;
3706 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3707 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3709 comparison
= reversed
;
3715 gcc_assert (mode
!= VOIDmode
);
3717 if (mode
!= orig_mode
)
3719 x
= convert_to_mode (mode
, x
, 0);
3720 y
= convert_to_mode (mode
, y
, 0);
3723 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3724 the RTL. The allows the RTL optimizers to delete the libcall if the
3725 condition can be determined at compile-time. */
3726 if (comparison
== UNORDERED
)
3728 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3729 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3730 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3731 temp
, const_true_rtx
, equiv
);
3735 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3736 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3738 rtx true_rtx
, false_rtx
;
3743 true_rtx
= const0_rtx
;
3744 false_rtx
= const_true_rtx
;
3748 true_rtx
= const_true_rtx
;
3749 false_rtx
= const0_rtx
;
3753 true_rtx
= const1_rtx
;
3754 false_rtx
= const0_rtx
;
3758 true_rtx
= const0_rtx
;
3759 false_rtx
= constm1_rtx
;
3763 true_rtx
= constm1_rtx
;
3764 false_rtx
= const0_rtx
;
3768 true_rtx
= const0_rtx
;
3769 false_rtx
= const1_rtx
;
3775 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3776 equiv
, true_rtx
, false_rtx
);
3781 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3782 word_mode
, 2, x
, mode
, y
, mode
);
3783 insns
= get_insns ();
3786 target
= gen_reg_rtx (word_mode
);
3787 emit_libcall_block (insns
, target
, value
, equiv
);
3789 if (comparison
== UNORDERED
3790 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3791 comparison
= reversed_p
? EQ
: NE
;
3796 *pcomparison
= comparison
;
3800 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3803 emit_indirect_jump (rtx loc
)
3805 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
3807 loc
= copy_to_mode_reg (Pmode
, loc
);
3809 emit_jump_insn (gen_indirect_jump (loc
));
3813 #ifdef HAVE_conditional_move
3815 /* Emit a conditional move instruction if the machine supports one for that
3816 condition and machine mode.
3818 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3819 the mode to use should they be constants. If it is VOIDmode, they cannot
3822 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3823 should be stored there. MODE is the mode to use should they be constants.
3824 If it is VOIDmode, they cannot both be constants.
3826 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3827 is not supported. */
3830 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3831 enum machine_mode cmode
, rtx op2
, rtx op3
,
3832 enum machine_mode mode
, int unsignedp
)
3834 rtx tem
, subtarget
, comparison
, insn
;
3835 enum insn_code icode
;
3836 enum rtx_code reversed
;
3838 /* If one operand is constant, make it the second one. Only do this
3839 if the other operand is not constant as well. */
3841 if (swap_commutative_operands_p (op0
, op1
))
3846 code
= swap_condition (code
);
3849 /* get_condition will prefer to generate LT and GT even if the old
3850 comparison was against zero, so undo that canonicalization here since
3851 comparisons against zero are cheaper. */
3852 if (code
== LT
&& op1
== const1_rtx
)
3853 code
= LE
, op1
= const0_rtx
;
3854 else if (code
== GT
&& op1
== constm1_rtx
)
3855 code
= GE
, op1
= const0_rtx
;
3857 if (cmode
== VOIDmode
)
3858 cmode
= GET_MODE (op0
);
3860 if (swap_commutative_operands_p (op2
, op3
)
3861 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3870 if (mode
== VOIDmode
)
3871 mode
= GET_MODE (op2
);
3873 icode
= movcc_gen_code
[mode
];
3875 if (icode
== CODE_FOR_nothing
)
3879 target
= gen_reg_rtx (mode
);
3883 /* If the insn doesn't accept these operands, put them in pseudos. */
3885 if (!insn_data
[icode
].operand
[0].predicate
3886 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3887 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3889 if (!insn_data
[icode
].operand
[2].predicate
3890 (op2
, insn_data
[icode
].operand
[2].mode
))
3891 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3893 if (!insn_data
[icode
].operand
[3].predicate
3894 (op3
, insn_data
[icode
].operand
[3].mode
))
3895 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3897 /* Everything should now be in the suitable form, so emit the compare insn
3898 and then the conditional move. */
3901 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3903 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3904 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3905 return NULL and let the caller figure out how best to deal with this
3907 if (GET_CODE (comparison
) != code
)
3910 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3912 /* If that failed, then give up. */
3918 if (subtarget
!= target
)
3919 convert_move (target
, subtarget
, 0);
3924 /* Return nonzero if a conditional move of mode MODE is supported.
3926 This function is for combine so it can tell whether an insn that looks
3927 like a conditional move is actually supported by the hardware. If we
3928 guess wrong we lose a bit on optimization, but that's it. */
3929 /* ??? sparc64 supports conditionally moving integers values based on fp
3930 comparisons, and vice versa. How do we handle them? */
3933 can_conditionally_move_p (enum machine_mode mode
)
3935 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3941 #endif /* HAVE_conditional_move */
3943 /* Emit a conditional addition instruction if the machine supports one for that
3944 condition and machine mode.
3946 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3947 the mode to use should they be constants. If it is VOIDmode, they cannot
3950 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3951 should be stored there. MODE is the mode to use should they be constants.
3952 If it is VOIDmode, they cannot both be constants.
3954 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3955 is not supported. */
3958 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3959 enum machine_mode cmode
, rtx op2
, rtx op3
,
3960 enum machine_mode mode
, int unsignedp
)
3962 rtx tem
, subtarget
, comparison
, insn
;
3963 enum insn_code icode
;
3964 enum rtx_code reversed
;
3966 /* If one operand is constant, make it the second one. Only do this
3967 if the other operand is not constant as well. */
3969 if (swap_commutative_operands_p (op0
, op1
))
3974 code
= swap_condition (code
);
3977 /* get_condition will prefer to generate LT and GT even if the old
3978 comparison was against zero, so undo that canonicalization here since
3979 comparisons against zero are cheaper. */
3980 if (code
== LT
&& op1
== const1_rtx
)
3981 code
= LE
, op1
= const0_rtx
;
3982 else if (code
== GT
&& op1
== constm1_rtx
)
3983 code
= GE
, op1
= const0_rtx
;
3985 if (cmode
== VOIDmode
)
3986 cmode
= GET_MODE (op0
);
3988 if (swap_commutative_operands_p (op2
, op3
)
3989 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3998 if (mode
== VOIDmode
)
3999 mode
= GET_MODE (op2
);
4001 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4003 if (icode
== CODE_FOR_nothing
)
4007 target
= gen_reg_rtx (mode
);
4009 /* If the insn doesn't accept these operands, put them in pseudos. */
4011 if (!insn_data
[icode
].operand
[0].predicate
4012 (target
, insn_data
[icode
].operand
[0].mode
))
4013 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4017 if (!insn_data
[icode
].operand
[2].predicate
4018 (op2
, insn_data
[icode
].operand
[2].mode
))
4019 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4021 if (!insn_data
[icode
].operand
[3].predicate
4022 (op3
, insn_data
[icode
].operand
[3].mode
))
4023 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4025 /* Everything should now be in the suitable form, so emit the compare insn
4026 and then the conditional move. */
4029 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4031 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4032 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4033 return NULL and let the caller figure out how best to deal with this
4035 if (GET_CODE (comparison
) != code
)
4038 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4040 /* If that failed, then give up. */
4046 if (subtarget
!= target
)
4047 convert_move (target
, subtarget
, 0);
4052 /* These functions attempt to generate an insn body, rather than
4053 emitting the insn, but if the gen function already emits them, we
4054 make no attempt to turn them back into naked patterns. */
4056 /* Generate and return an insn body to add Y to X. */
4059 gen_add2_insn (rtx x
, rtx y
)
4061 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4063 gcc_assert (insn_data
[icode
].operand
[0].predicate
4064 (x
, insn_data
[icode
].operand
[0].mode
));
4065 gcc_assert (insn_data
[icode
].operand
[1].predicate
4066 (x
, insn_data
[icode
].operand
[1].mode
));
4067 gcc_assert (insn_data
[icode
].operand
[2].predicate
4068 (y
, insn_data
[icode
].operand
[2].mode
));
4070 return GEN_FCN (icode
) (x
, x
, y
);
4073 /* Generate and return an insn body to add r1 and c,
4074 storing the result in r0. */
4076 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4078 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4080 if (icode
== CODE_FOR_nothing
4081 || !(insn_data
[icode
].operand
[0].predicate
4082 (r0
, insn_data
[icode
].operand
[0].mode
))
4083 || !(insn_data
[icode
].operand
[1].predicate
4084 (r1
, insn_data
[icode
].operand
[1].mode
))
4085 || !(insn_data
[icode
].operand
[2].predicate
4086 (c
, insn_data
[icode
].operand
[2].mode
)))
4089 return GEN_FCN (icode
) (r0
, r1
, c
);
4093 have_add2_insn (rtx x
, rtx y
)
4097 gcc_assert (GET_MODE (x
) != VOIDmode
);
4099 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4101 if (icode
== CODE_FOR_nothing
)
4104 if (!(insn_data
[icode
].operand
[0].predicate
4105 (x
, insn_data
[icode
].operand
[0].mode
))
4106 || !(insn_data
[icode
].operand
[1].predicate
4107 (x
, insn_data
[icode
].operand
[1].mode
))
4108 || !(insn_data
[icode
].operand
[2].predicate
4109 (y
, insn_data
[icode
].operand
[2].mode
)))
4115 /* Generate and return an insn body to subtract Y from X. */
4118 gen_sub2_insn (rtx x
, rtx y
)
4120 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4122 gcc_assert (insn_data
[icode
].operand
[0].predicate
4123 (x
, insn_data
[icode
].operand
[0].mode
));
4124 gcc_assert (insn_data
[icode
].operand
[1].predicate
4125 (x
, insn_data
[icode
].operand
[1].mode
));
4126 gcc_assert (insn_data
[icode
].operand
[2].predicate
4127 (y
, insn_data
[icode
].operand
[2].mode
));
4129 return GEN_FCN (icode
) (x
, x
, y
);
4132 /* Generate and return an insn body to subtract r1 and c,
4133 storing the result in r0. */
4135 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4137 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4139 if (icode
== CODE_FOR_nothing
4140 || !(insn_data
[icode
].operand
[0].predicate
4141 (r0
, insn_data
[icode
].operand
[0].mode
))
4142 || !(insn_data
[icode
].operand
[1].predicate
4143 (r1
, insn_data
[icode
].operand
[1].mode
))
4144 || !(insn_data
[icode
].operand
[2].predicate
4145 (c
, insn_data
[icode
].operand
[2].mode
)))
4148 return GEN_FCN (icode
) (r0
, r1
, c
);
4152 have_sub2_insn (rtx x
, rtx y
)
4156 gcc_assert (GET_MODE (x
) != VOIDmode
);
4158 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4160 if (icode
== CODE_FOR_nothing
)
4163 if (!(insn_data
[icode
].operand
[0].predicate
4164 (x
, insn_data
[icode
].operand
[0].mode
))
4165 || !(insn_data
[icode
].operand
[1].predicate
4166 (x
, insn_data
[icode
].operand
[1].mode
))
4167 || !(insn_data
[icode
].operand
[2].predicate
4168 (y
, insn_data
[icode
].operand
[2].mode
)))
4174 /* Generate the body of an instruction to copy Y into X.
4175 It may be a list of insns, if one insn isn't enough. */
4178 gen_move_insn (rtx x
, rtx y
)
4183 emit_move_insn_1 (x
, y
);
4189 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4190 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4191 no such operation exists, CODE_FOR_nothing will be returned. */
4194 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4198 #ifdef HAVE_ptr_extend
4200 return CODE_FOR_ptr_extend
;
4203 tab
= unsignedp
? zext_optab
: sext_optab
;
4204 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4207 /* Generate the body of an insn to extend Y (with mode MFROM)
4208 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4211 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4212 enum machine_mode mfrom
, int unsignedp
)
4214 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4215 return GEN_FCN (icode
) (x
, y
);
4218 /* can_fix_p and can_float_p say whether the target machine
4219 can directly convert a given fixed point type to
4220 a given floating point type, or vice versa.
4221 The returned value is the CODE_FOR_... value to use,
4222 or CODE_FOR_nothing if these modes cannot be directly converted.
4224 *TRUNCP_PTR is set to 1 if it is necessary to output
4225 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4227 static enum insn_code
4228 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4229 int unsignedp
, int *truncp_ptr
)
4232 enum insn_code icode
;
4234 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4235 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4236 if (icode
!= CODE_FOR_nothing
)
4242 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4243 for this to work. We need to rework the fix* and ftrunc* patterns
4244 and documentation. */
4245 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4246 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4247 if (icode
!= CODE_FOR_nothing
4248 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4255 return CODE_FOR_nothing
;
4258 static enum insn_code
4259 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4264 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4265 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4268 /* Generate code to convert FROM to floating point
4269 and store in TO. FROM must be fixed point and not VOIDmode.
4270 UNSIGNEDP nonzero means regard FROM as unsigned.
4271 Normally this is done by correcting the final value
4272 if it is negative. */
4275 expand_float (rtx to
, rtx from
, int unsignedp
)
4277 enum insn_code icode
;
4279 enum machine_mode fmode
, imode
;
4281 /* Crash now, because we won't be able to decide which mode to use. */
4282 gcc_assert (GET_MODE (from
) != VOIDmode
);
4284 /* Look for an insn to do the conversion. Do it in the specified
4285 modes if possible; otherwise convert either input, output or both to
4286 wider mode. If the integer mode is wider than the mode of FROM,
4287 we can do the conversion signed even if the input is unsigned. */
4289 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4290 fmode
= GET_MODE_WIDER_MODE (fmode
))
4291 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4292 imode
= GET_MODE_WIDER_MODE (imode
))
4294 int doing_unsigned
= unsignedp
;
4296 if (fmode
!= GET_MODE (to
)
4297 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4300 icode
= can_float_p (fmode
, imode
, unsignedp
);
4301 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4302 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4304 if (icode
!= CODE_FOR_nothing
)
4306 if (imode
!= GET_MODE (from
))
4307 from
= convert_to_mode (imode
, from
, unsignedp
);
4309 if (fmode
!= GET_MODE (to
))
4310 target
= gen_reg_rtx (fmode
);
4312 emit_unop_insn (icode
, target
, from
,
4313 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4316 convert_move (to
, target
, 0);
4321 /* Unsigned integer, and no way to convert directly.
4322 Convert as signed, then conditionally adjust the result. */
4325 rtx label
= gen_label_rtx ();
4327 REAL_VALUE_TYPE offset
;
4329 /* Look for a usable floating mode FMODE wider than the source and at
4330 least as wide as the target. Using FMODE will avoid rounding woes
4331 with unsigned values greater than the signed maximum value. */
4333 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4334 fmode
= GET_MODE_WIDER_MODE (fmode
))
4335 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4336 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4339 if (fmode
== VOIDmode
)
4341 /* There is no such mode. Pretend the target is wide enough. */
4342 fmode
= GET_MODE (to
);
4344 /* Avoid double-rounding when TO is narrower than FROM. */
4345 if ((significand_size (fmode
) + 1)
4346 < GET_MODE_BITSIZE (GET_MODE (from
)))
4349 rtx neglabel
= gen_label_rtx ();
4351 /* Don't use TARGET if it isn't a register, is a hard register,
4352 or is the wrong mode. */
4354 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4355 || GET_MODE (target
) != fmode
)
4356 target
= gen_reg_rtx (fmode
);
4358 imode
= GET_MODE (from
);
4359 do_pending_stack_adjust ();
4361 /* Test whether the sign bit is set. */
4362 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4365 /* The sign bit is not set. Convert as signed. */
4366 expand_float (target
, from
, 0);
4367 emit_jump_insn (gen_jump (label
));
4370 /* The sign bit is set.
4371 Convert to a usable (positive signed) value by shifting right
4372 one bit, while remembering if a nonzero bit was shifted
4373 out; i.e., compute (from & 1) | (from >> 1). */
4375 emit_label (neglabel
);
4376 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4377 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4378 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4380 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4382 expand_float (target
, temp
, 0);
4384 /* Multiply by 2 to undo the shift above. */
4385 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4386 target
, 0, OPTAB_LIB_WIDEN
);
4388 emit_move_insn (target
, temp
);
4390 do_pending_stack_adjust ();
4396 /* If we are about to do some arithmetic to correct for an
4397 unsigned operand, do it in a pseudo-register. */
4399 if (GET_MODE (to
) != fmode
4400 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4401 target
= gen_reg_rtx (fmode
);
4403 /* Convert as signed integer to floating. */
4404 expand_float (target
, from
, 0);
4406 /* If FROM is negative (and therefore TO is negative),
4407 correct its value by 2**bitwidth. */
4409 do_pending_stack_adjust ();
4410 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4414 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4415 temp
= expand_binop (fmode
, add_optab
, target
,
4416 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4417 target
, 0, OPTAB_LIB_WIDEN
);
4419 emit_move_insn (target
, temp
);
4421 do_pending_stack_adjust ();
4426 /* No hardware instruction available; call a library routine. */
4431 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4433 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4434 from
= convert_to_mode (SImode
, from
, unsignedp
);
4436 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4437 gcc_assert (libfunc
);
4441 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4442 GET_MODE (to
), 1, from
,
4444 insns
= get_insns ();
4447 emit_libcall_block (insns
, target
, value
,
4448 gen_rtx_FLOAT (GET_MODE (to
), from
));
4453 /* Copy result to requested destination
4454 if we have been computing in a temp location. */
4458 if (GET_MODE (target
) == GET_MODE (to
))
4459 emit_move_insn (to
, target
);
4461 convert_move (to
, target
, 0);
4465 /* Generate code to convert FROM to fixed point and store in TO. FROM
4466 must be floating point. */
4469 expand_fix (rtx to
, rtx from
, int unsignedp
)
4471 enum insn_code icode
;
4473 enum machine_mode fmode
, imode
;
4476 /* We first try to find a pair of modes, one real and one integer, at
4477 least as wide as FROM and TO, respectively, in which we can open-code
4478 this conversion. If the integer mode is wider than the mode of TO,
4479 we can do the conversion either signed or unsigned. */
4481 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4482 fmode
= GET_MODE_WIDER_MODE (fmode
))
4483 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4484 imode
= GET_MODE_WIDER_MODE (imode
))
4486 int doing_unsigned
= unsignedp
;
4488 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4489 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4490 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4492 if (icode
!= CODE_FOR_nothing
)
4494 if (fmode
!= GET_MODE (from
))
4495 from
= convert_to_mode (fmode
, from
, 0);
4499 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4500 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4504 if (imode
!= GET_MODE (to
))
4505 target
= gen_reg_rtx (imode
);
4507 emit_unop_insn (icode
, target
, from
,
4508 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4510 convert_move (to
, target
, unsignedp
);
4515 /* For an unsigned conversion, there is one more way to do it.
4516 If we have a signed conversion, we generate code that compares
4517 the real value to the largest representable positive number. If if
4518 is smaller, the conversion is done normally. Otherwise, subtract
4519 one plus the highest signed number, convert, and add it back.
4521 We only need to check all real modes, since we know we didn't find
4522 anything with a wider integer mode.
4524 This code used to extend FP value into mode wider than the destination.
4525 This is not needed. Consider, for instance conversion from SFmode
4528 The hot path trought the code is dealing with inputs smaller than 2^63
4529 and doing just the conversion, so there is no bits to lose.
4531 In the other path we know the value is positive in the range 2^63..2^64-1
4532 inclusive. (as for other imput overflow happens and result is undefined)
4533 So we know that the most important bit set in mantissa corresponds to
4534 2^63. The subtraction of 2^63 should not generate any rounding as it
4535 simply clears out that bit. The rest is trivial. */
4537 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4538 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4539 fmode
= GET_MODE_WIDER_MODE (fmode
))
4540 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4544 REAL_VALUE_TYPE offset
;
4545 rtx limit
, lab1
, lab2
, insn
;
4547 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4548 real_2expN (&offset
, bitsize
- 1);
4549 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4550 lab1
= gen_label_rtx ();
4551 lab2
= gen_label_rtx ();
4553 if (fmode
!= GET_MODE (from
))
4554 from
= convert_to_mode (fmode
, from
, 0);
4556 /* See if we need to do the subtraction. */
4557 do_pending_stack_adjust ();
4558 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4561 /* If not, do the signed "fix" and branch around fixup code. */
4562 expand_fix (to
, from
, 0);
4563 emit_jump_insn (gen_jump (lab2
));
4566 /* Otherwise, subtract 2**(N-1), convert to signed number,
4567 then add 2**(N-1). Do the addition using XOR since this
4568 will often generate better code. */
4570 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4571 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4572 expand_fix (to
, target
, 0);
4573 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4575 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4577 to
, 1, OPTAB_LIB_WIDEN
);
4580 emit_move_insn (to
, target
);
4584 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4585 != CODE_FOR_nothing
)
4587 /* Make a place for a REG_NOTE and add it. */
4588 insn
= emit_move_insn (to
, to
);
4589 set_unique_reg_note (insn
,
4591 gen_rtx_fmt_e (UNSIGNED_FIX
,
4599 /* We can't do it with an insn, so use a library call. But first ensure
4600 that the mode of TO is at least as wide as SImode, since those are the
4601 only library calls we know about. */
4603 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4605 target
= gen_reg_rtx (SImode
);
4607 expand_fix (target
, from
, unsignedp
);
4615 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4616 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4617 gcc_assert (libfunc
);
4621 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4622 GET_MODE (to
), 1, from
,
4624 insns
= get_insns ();
4627 emit_libcall_block (insns
, target
, value
,
4628 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4629 GET_MODE (to
), from
));
4634 if (GET_MODE (to
) == GET_MODE (target
))
4635 emit_move_insn (to
, target
);
4637 convert_move (to
, target
, 0);
4641 /* Report whether we have an instruction to perform the operation
4642 specified by CODE on operands of mode MODE. */
4644 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4646 return (code_to_optab
[(int) code
] != 0
4647 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4648 != CODE_FOR_nothing
));
4651 /* Create a blank optab. */
4656 optab op
= ggc_alloc (sizeof (struct optab
));
4657 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4659 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4660 op
->handlers
[i
].libfunc
= 0;
4666 static convert_optab
4667 new_convert_optab (void)
4670 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4671 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4672 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4674 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4675 op
->handlers
[i
][j
].libfunc
= 0;
4680 /* Same, but fill in its code as CODE, and write it into the
4681 code_to_optab table. */
4683 init_optab (enum rtx_code code
)
4685 optab op
= new_optab ();
4687 code_to_optab
[(int) code
] = op
;
4691 /* Same, but fill in its code as CODE, and do _not_ write it into
4692 the code_to_optab table. */
4694 init_optabv (enum rtx_code code
)
4696 optab op
= new_optab ();
4701 /* Conversion optabs never go in the code_to_optab table. */
4702 static inline convert_optab
4703 init_convert_optab (enum rtx_code code
)
4705 convert_optab op
= new_convert_optab ();
4710 /* Initialize the libfunc fields of an entire group of entries in some
4711 optab. Each entry is set equal to a string consisting of a leading
4712 pair of underscores followed by a generic operation name followed by
4713 a mode name (downshifted to lowercase) followed by a single character
4714 representing the number of operands for the given operation (which is
4715 usually one of the characters '2', '3', or '4').
4717 OPTABLE is the table in which libfunc fields are to be initialized.
4718 FIRST_MODE is the first machine mode index in the given optab to
4720 LAST_MODE is the last machine mode index in the given optab to
4722 OPNAME is the generic (string) name of the operation.
4723 SUFFIX is the character which specifies the number of operands for
4724 the given generic operation.
4728 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4729 const char *opname
, int suffix
)
4732 unsigned opname_len
= strlen (opname
);
4734 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4735 mode
= (enum machine_mode
) ((int) mode
+ 1))
4737 const char *mname
= GET_MODE_NAME (mode
);
4738 unsigned mname_len
= strlen (mname
);
4739 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4746 for (q
= opname
; *q
; )
4748 for (q
= mname
; *q
; q
++)
4749 *p
++ = TOLOWER (*q
);
4753 optable
->handlers
[(int) mode
].libfunc
4754 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4758 /* Initialize the libfunc fields of an entire group of entries in some
4759 optab which correspond to all integer mode operations. The parameters
4760 have the same meaning as similarly named ones for the `init_libfuncs'
4761 routine. (See above). */
4764 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4766 int maxsize
= 2*BITS_PER_WORD
;
4767 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4768 maxsize
= LONG_LONG_TYPE_SIZE
;
4769 init_libfuncs (optable
, word_mode
,
4770 mode_for_size (maxsize
, MODE_INT
, 0),
4774 /* Initialize the libfunc fields of an entire group of entries in some
4775 optab which correspond to all real mode operations. The parameters
4776 have the same meaning as similarly named ones for the `init_libfuncs'
4777 routine. (See above). */
4780 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4782 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4785 /* Initialize the libfunc fields of an entire group of entries of an
4786 inter-mode-class conversion optab. The string formation rules are
4787 similar to the ones for init_libfuncs, above, but instead of having
4788 a mode name and an operand count these functions have two mode names
4789 and no operand count. */
4791 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4792 enum mode_class from_class
,
4793 enum mode_class to_class
)
4795 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4796 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4797 size_t opname_len
= strlen (opname
);
4798 size_t max_mname_len
= 0;
4800 enum machine_mode fmode
, tmode
;
4801 const char *fname
, *tname
;
4803 char *libfunc_name
, *suffix
;
4806 for (fmode
= first_from_mode
;
4808 fmode
= GET_MODE_WIDER_MODE (fmode
))
4809 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4811 for (tmode
= first_to_mode
;
4813 tmode
= GET_MODE_WIDER_MODE (tmode
))
4814 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4816 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4817 libfunc_name
[0] = '_';
4818 libfunc_name
[1] = '_';
4819 memcpy (&libfunc_name
[2], opname
, opname_len
);
4820 suffix
= libfunc_name
+ opname_len
+ 2;
4822 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4823 fmode
= GET_MODE_WIDER_MODE (fmode
))
4824 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4825 tmode
= GET_MODE_WIDER_MODE (tmode
))
4827 fname
= GET_MODE_NAME (fmode
);
4828 tname
= GET_MODE_NAME (tmode
);
4831 for (q
= fname
; *q
; p
++, q
++)
4833 for (q
= tname
; *q
; p
++, q
++)
4838 tab
->handlers
[tmode
][fmode
].libfunc
4839 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4844 /* Initialize the libfunc fields of an entire group of entries of an
4845 intra-mode-class conversion optab. The string formation rules are
4846 similar to the ones for init_libfunc, above. WIDENING says whether
4847 the optab goes from narrow to wide modes or vice versa. These functions
4848 have two mode names _and_ an operand count. */
4850 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4851 enum mode_class
class, bool widening
)
4853 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4854 size_t opname_len
= strlen (opname
);
4855 size_t max_mname_len
= 0;
4857 enum machine_mode nmode
, wmode
;
4858 const char *nname
, *wname
;
4860 char *libfunc_name
, *suffix
;
4863 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4864 nmode
= GET_MODE_WIDER_MODE (nmode
))
4865 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4867 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4868 libfunc_name
[0] = '_';
4869 libfunc_name
[1] = '_';
4870 memcpy (&libfunc_name
[2], opname
, opname_len
);
4871 suffix
= libfunc_name
+ opname_len
+ 2;
4873 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4874 nmode
= GET_MODE_WIDER_MODE (nmode
))
4875 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4876 wmode
= GET_MODE_WIDER_MODE (wmode
))
4878 nname
= GET_MODE_NAME (nmode
);
4879 wname
= GET_MODE_NAME (wmode
);
4882 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4884 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4890 tab
->handlers
[widening
? wmode
: nmode
]
4891 [widening
? nmode
: wmode
].libfunc
4892 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4899 init_one_libfunc (const char *name
)
4903 /* Create a FUNCTION_DECL that can be passed to
4904 targetm.encode_section_info. */
4905 /* ??? We don't have any type information except for this is
4906 a function. Pretend this is "int foo()". */
4907 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4908 build_function_type (integer_type_node
, NULL_TREE
));
4909 DECL_ARTIFICIAL (decl
) = 1;
4910 DECL_EXTERNAL (decl
) = 1;
4911 TREE_PUBLIC (decl
) = 1;
4913 symbol
= XEXP (DECL_RTL (decl
), 0);
4915 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4916 are the flags assigned by targetm.encode_section_info. */
4917 SYMBOL_REF_DECL (symbol
) = 0;
4922 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4923 MODE to NAME, which should be either 0 or a string constant. */
4925 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4928 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4930 optable
->handlers
[mode
].libfunc
= 0;
4933 /* Call this to reset the function entry for one conversion optab
4934 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4935 either 0 or a string constant. */
4937 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4938 enum machine_mode fmode
, const char *name
)
4941 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4943 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4946 /* Call this once to initialize the contents of the optabs
4947 appropriately for the current target machine. */
4954 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4956 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4957 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4959 #ifdef HAVE_conditional_move
4960 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4961 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4964 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4966 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4967 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4970 add_optab
= init_optab (PLUS
);
4971 addv_optab
= init_optabv (PLUS
);
4972 sub_optab
= init_optab (MINUS
);
4973 subv_optab
= init_optabv (MINUS
);
4974 smul_optab
= init_optab (MULT
);
4975 smulv_optab
= init_optabv (MULT
);
4976 smul_highpart_optab
= init_optab (UNKNOWN
);
4977 umul_highpart_optab
= init_optab (UNKNOWN
);
4978 smul_widen_optab
= init_optab (UNKNOWN
);
4979 umul_widen_optab
= init_optab (UNKNOWN
);
4980 sdiv_optab
= init_optab (DIV
);
4981 sdivv_optab
= init_optabv (DIV
);
4982 sdivmod_optab
= init_optab (UNKNOWN
);
4983 udiv_optab
= init_optab (UDIV
);
4984 udivmod_optab
= init_optab (UNKNOWN
);
4985 smod_optab
= init_optab (MOD
);
4986 umod_optab
= init_optab (UMOD
);
4987 fmod_optab
= init_optab (UNKNOWN
);
4988 drem_optab
= init_optab (UNKNOWN
);
4989 ftrunc_optab
= init_optab (UNKNOWN
);
4990 and_optab
= init_optab (AND
);
4991 ior_optab
= init_optab (IOR
);
4992 xor_optab
= init_optab (XOR
);
4993 ashl_optab
= init_optab (ASHIFT
);
4994 ashr_optab
= init_optab (ASHIFTRT
);
4995 lshr_optab
= init_optab (LSHIFTRT
);
4996 rotl_optab
= init_optab (ROTATE
);
4997 rotr_optab
= init_optab (ROTATERT
);
4998 smin_optab
= init_optab (SMIN
);
4999 smax_optab
= init_optab (SMAX
);
5000 umin_optab
= init_optab (UMIN
);
5001 umax_optab
= init_optab (UMAX
);
5002 pow_optab
= init_optab (UNKNOWN
);
5003 atan2_optab
= init_optab (UNKNOWN
);
5005 /* These three have codes assigned exclusively for the sake of
5007 mov_optab
= init_optab (SET
);
5008 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5009 cmp_optab
= init_optab (COMPARE
);
5011 ucmp_optab
= init_optab (UNKNOWN
);
5012 tst_optab
= init_optab (UNKNOWN
);
5014 eq_optab
= init_optab (EQ
);
5015 ne_optab
= init_optab (NE
);
5016 gt_optab
= init_optab (GT
);
5017 ge_optab
= init_optab (GE
);
5018 lt_optab
= init_optab (LT
);
5019 le_optab
= init_optab (LE
);
5020 unord_optab
= init_optab (UNORDERED
);
5022 neg_optab
= init_optab (NEG
);
5023 negv_optab
= init_optabv (NEG
);
5024 abs_optab
= init_optab (ABS
);
5025 absv_optab
= init_optabv (ABS
);
5026 addcc_optab
= init_optab (UNKNOWN
);
5027 one_cmpl_optab
= init_optab (NOT
);
5028 ffs_optab
= init_optab (FFS
);
5029 clz_optab
= init_optab (CLZ
);
5030 ctz_optab
= init_optab (CTZ
);
5031 popcount_optab
= init_optab (POPCOUNT
);
5032 parity_optab
= init_optab (PARITY
);
5033 sqrt_optab
= init_optab (SQRT
);
5034 floor_optab
= init_optab (UNKNOWN
);
5035 lfloor_optab
= init_optab (UNKNOWN
);
5036 ceil_optab
= init_optab (UNKNOWN
);
5037 lceil_optab
= init_optab (UNKNOWN
);
5038 round_optab
= init_optab (UNKNOWN
);
5039 btrunc_optab
= init_optab (UNKNOWN
);
5040 nearbyint_optab
= init_optab (UNKNOWN
);
5041 rint_optab
= init_optab (UNKNOWN
);
5042 lrint_optab
= init_optab (UNKNOWN
);
5043 sincos_optab
= init_optab (UNKNOWN
);
5044 sin_optab
= init_optab (UNKNOWN
);
5045 asin_optab
= init_optab (UNKNOWN
);
5046 cos_optab
= init_optab (UNKNOWN
);
5047 acos_optab
= init_optab (UNKNOWN
);
5048 exp_optab
= init_optab (UNKNOWN
);
5049 exp10_optab
= init_optab (UNKNOWN
);
5050 exp2_optab
= init_optab (UNKNOWN
);
5051 expm1_optab
= init_optab (UNKNOWN
);
5052 ldexp_optab
= init_optab (UNKNOWN
);
5053 logb_optab
= init_optab (UNKNOWN
);
5054 ilogb_optab
= init_optab (UNKNOWN
);
5055 log_optab
= init_optab (UNKNOWN
);
5056 log10_optab
= init_optab (UNKNOWN
);
5057 log2_optab
= init_optab (UNKNOWN
);
5058 log1p_optab
= init_optab (UNKNOWN
);
5059 tan_optab
= init_optab (UNKNOWN
);
5060 atan_optab
= init_optab (UNKNOWN
);
5061 copysign_optab
= init_optab (UNKNOWN
);
5063 strlen_optab
= init_optab (UNKNOWN
);
5064 cbranch_optab
= init_optab (UNKNOWN
);
5065 cmov_optab
= init_optab (UNKNOWN
);
5066 cstore_optab
= init_optab (UNKNOWN
);
5067 push_optab
= init_optab (UNKNOWN
);
5069 reduc_smax_optab
= init_optab (UNKNOWN
);
5070 reduc_umax_optab
= init_optab (UNKNOWN
);
5071 reduc_smin_optab
= init_optab (UNKNOWN
);
5072 reduc_umin_optab
= init_optab (UNKNOWN
);
5073 reduc_splus_optab
= init_optab (UNKNOWN
);
5074 reduc_uplus_optab
= init_optab (UNKNOWN
);
5076 vec_extract_optab
= init_optab (UNKNOWN
);
5077 vec_set_optab
= init_optab (UNKNOWN
);
5078 vec_init_optab
= init_optab (UNKNOWN
);
5079 vec_shl_optab
= init_optab (UNKNOWN
);
5080 vec_shr_optab
= init_optab (UNKNOWN
);
5081 vec_realign_load_optab
= init_optab (UNKNOWN
);
5082 movmisalign_optab
= init_optab (UNKNOWN
);
5084 powi_optab
= init_optab (UNKNOWN
);
5087 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5088 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5089 trunc_optab
= init_convert_optab (TRUNCATE
);
5090 sfix_optab
= init_convert_optab (FIX
);
5091 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5092 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5093 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5094 sfloat_optab
= init_convert_optab (FLOAT
);
5095 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5097 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5099 movmem_optab
[i
] = CODE_FOR_nothing
;
5100 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5101 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
5102 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5103 setmem_optab
[i
] = CODE_FOR_nothing
;
5105 sync_add_optab
[i
] = CODE_FOR_nothing
;
5106 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5107 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5108 sync_and_optab
[i
] = CODE_FOR_nothing
;
5109 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5110 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5111 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5112 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5113 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5114 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5115 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5116 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5117 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5118 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5119 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5120 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5121 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5122 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5123 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5124 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5125 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5126 sync_lock_release
[i
] = CODE_FOR_nothing
;
5128 #ifdef HAVE_SECONDARY_RELOADS
5129 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5133 /* Fill in the optabs with the insns we support. */
5136 /* Initialize the optabs with the names of the library functions. */
5137 init_integral_libfuncs (add_optab
, "add", '3');
5138 init_floating_libfuncs (add_optab
, "add", '3');
5139 init_integral_libfuncs (addv_optab
, "addv", '3');
5140 init_floating_libfuncs (addv_optab
, "add", '3');
5141 init_integral_libfuncs (sub_optab
, "sub", '3');
5142 init_floating_libfuncs (sub_optab
, "sub", '3');
5143 init_integral_libfuncs (subv_optab
, "subv", '3');
5144 init_floating_libfuncs (subv_optab
, "sub", '3');
5145 init_integral_libfuncs (smul_optab
, "mul", '3');
5146 init_floating_libfuncs (smul_optab
, "mul", '3');
5147 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5148 init_floating_libfuncs (smulv_optab
, "mul", '3');
5149 init_integral_libfuncs (sdiv_optab
, "div", '3');
5150 init_floating_libfuncs (sdiv_optab
, "div", '3');
5151 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5152 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5153 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5154 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5155 init_integral_libfuncs (smod_optab
, "mod", '3');
5156 init_integral_libfuncs (umod_optab
, "umod", '3');
5157 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5158 init_integral_libfuncs (and_optab
, "and", '3');
5159 init_integral_libfuncs (ior_optab
, "ior", '3');
5160 init_integral_libfuncs (xor_optab
, "xor", '3');
5161 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5162 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5163 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5164 init_integral_libfuncs (smin_optab
, "min", '3');
5165 init_floating_libfuncs (smin_optab
, "min", '3');
5166 init_integral_libfuncs (smax_optab
, "max", '3');
5167 init_floating_libfuncs (smax_optab
, "max", '3');
5168 init_integral_libfuncs (umin_optab
, "umin", '3');
5169 init_integral_libfuncs (umax_optab
, "umax", '3');
5170 init_integral_libfuncs (neg_optab
, "neg", '2');
5171 init_floating_libfuncs (neg_optab
, "neg", '2');
5172 init_integral_libfuncs (negv_optab
, "negv", '2');
5173 init_floating_libfuncs (negv_optab
, "neg", '2');
5174 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5175 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5176 init_integral_libfuncs (clz_optab
, "clz", '2');
5177 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5178 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5179 init_integral_libfuncs (parity_optab
, "parity", '2');
5181 /* Comparison libcalls for integers MUST come in pairs,
5183 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5184 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5185 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5187 /* EQ etc are floating point only. */
5188 init_floating_libfuncs (eq_optab
, "eq", '2');
5189 init_floating_libfuncs (ne_optab
, "ne", '2');
5190 init_floating_libfuncs (gt_optab
, "gt", '2');
5191 init_floating_libfuncs (ge_optab
, "ge", '2');
5192 init_floating_libfuncs (lt_optab
, "lt", '2');
5193 init_floating_libfuncs (le_optab
, "le", '2');
5194 init_floating_libfuncs (unord_optab
, "unord", '2');
5196 init_floating_libfuncs (powi_optab
, "powi", '2');
5199 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5200 MODE_INT
, MODE_FLOAT
);
5201 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5202 MODE_FLOAT
, MODE_INT
);
5203 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5204 MODE_FLOAT
, MODE_INT
);
5206 /* sext_optab is also used for FLOAT_EXTEND. */
5207 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5208 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5210 /* Use cabs for double complex abs, since systems generally have cabs.
5211 Don't define any libcall for float complex, so that cabs will be used. */
5212 if (complex_double_type_node
)
5213 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5214 = init_one_libfunc ("cabs");
5216 /* The ffs function operates on `int'. */
5217 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5218 = init_one_libfunc ("ffs");
5220 abort_libfunc
= init_one_libfunc ("abort");
5221 memcpy_libfunc
= init_one_libfunc ("memcpy");
5222 memmove_libfunc
= init_one_libfunc ("memmove");
5223 memcmp_libfunc
= init_one_libfunc ("memcmp");
5224 memset_libfunc
= init_one_libfunc ("memset");
5225 setbits_libfunc
= init_one_libfunc ("__setbits");
5227 #ifndef DONT_USE_BUILTIN_SETJMP
5228 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5229 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5231 setjmp_libfunc
= init_one_libfunc ("setjmp");
5232 longjmp_libfunc
= init_one_libfunc ("longjmp");
5234 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5235 unwind_sjlj_unregister_libfunc
5236 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5238 /* For function entry/exit instrumentation. */
5239 profile_function_entry_libfunc
5240 = init_one_libfunc ("__cyg_profile_func_enter");
5241 profile_function_exit_libfunc
5242 = init_one_libfunc ("__cyg_profile_func_exit");
5244 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5246 if (HAVE_conditional_trap
)
5247 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5249 /* Allow the target to add more libcalls or rename some, etc. */
5250 targetm
.init_libfuncs ();
5255 /* Print information about the current contents of the optabs on
5259 debug_optab_libfuncs (void)
5265 /* Dump the arithmetic optabs. */
5266 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5267 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5270 struct optab_handlers
*h
;
5273 h
= &o
->handlers
[j
];
5276 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5277 fprintf (stderr
, "%s\t%s:\t%s\n",
5278 GET_RTX_NAME (o
->code
),
5280 XSTR (h
->libfunc
, 0));
5284 /* Dump the conversion optabs. */
5285 for (i
= 0; i
< (int) COI_MAX
; ++i
)
5286 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5287 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5290 struct optab_handlers
*h
;
5292 o
= &convert_optab_table
[i
];
5293 h
= &o
->handlers
[j
][k
];
5296 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5297 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5298 GET_RTX_NAME (o
->code
),
5301 XSTR (h
->libfunc
, 0));
5309 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5310 CODE. Return 0 on failure. */
5313 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5314 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5316 enum machine_mode mode
= GET_MODE (op1
);
5317 enum insn_code icode
;
5320 if (!HAVE_conditional_trap
)
5323 if (mode
== VOIDmode
)
5326 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5327 if (icode
== CODE_FOR_nothing
)
5331 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5332 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5338 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5340 PUT_CODE (trap_rtx
, code
);
5341 gcc_assert (HAVE_conditional_trap
);
5342 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5346 insn
= get_insns ();
5353 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5354 or unsigned operation code. */
5356 static enum rtx_code
5357 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5369 code
= unsignedp
? LTU
: LT
;
5372 code
= unsignedp
? LEU
: LE
;
5375 code
= unsignedp
? GTU
: GT
;
5378 code
= unsignedp
? GEU
: GE
;
5381 case UNORDERED_EXPR
:
5412 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5413 unsigned operators. Do not generate compare instruction. */
5416 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5418 enum rtx_code rcode
;
5420 rtx rtx_op0
, rtx_op1
;
5422 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5423 ensures that condition is a relational operation. */
5424 gcc_assert (COMPARISON_CLASS_P (cond
));
5426 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5427 t_op0
= TREE_OPERAND (cond
, 0);
5428 t_op1
= TREE_OPERAND (cond
, 1);
5430 /* Expand operands. */
5431 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5432 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5434 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5435 && GET_MODE (rtx_op0
) != VOIDmode
)
5436 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5438 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5439 && GET_MODE (rtx_op1
) != VOIDmode
)
5440 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5442 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5445 /* Return insn code for VEC_COND_EXPR EXPR. */
5447 static inline enum insn_code
5448 get_vcond_icode (tree expr
, enum machine_mode mode
)
5450 enum insn_code icode
= CODE_FOR_nothing
;
5452 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5453 icode
= vcondu_gen_code
[mode
];
5455 icode
= vcond_gen_code
[mode
];
5459 /* Return TRUE iff, appropriate vector insns are available
5460 for vector cond expr expr in VMODE mode. */
5463 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5465 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5470 /* Generate insns for VEC_COND_EXPR. */
5473 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5475 enum insn_code icode
;
5476 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5477 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5478 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5480 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5481 if (icode
== CODE_FOR_nothing
)
5484 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5485 target
= gen_reg_rtx (mode
);
5487 /* Get comparison rtx. First expand both cond expr operands. */
5488 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5490 cc_op0
= XEXP (comparison
, 0);
5491 cc_op1
= XEXP (comparison
, 1);
5492 /* Expand both operands and force them in reg, if required. */
5493 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5494 NULL_RTX
, VOIDmode
, 1);
5495 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5496 && mode
!= VOIDmode
)
5497 rtx_op1
= force_reg (mode
, rtx_op1
);
5499 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5500 NULL_RTX
, VOIDmode
, 1);
5501 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5502 && mode
!= VOIDmode
)
5503 rtx_op2
= force_reg (mode
, rtx_op2
);
5505 /* Emit instruction! */
5506 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5507 comparison
, cc_op0
, cc_op1
));
5513 /* This is an internal subroutine of the other compare_and_swap expanders.
5514 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5515 operation. TARGET is an optional place to store the value result of
5516 the operation. ICODE is the particular instruction to expand. Return
5517 the result of the operation. */
5520 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5521 rtx target
, enum insn_code icode
)
5523 enum machine_mode mode
= GET_MODE (mem
);
5526 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5527 target
= gen_reg_rtx (mode
);
5529 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5530 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5531 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5532 old_val
= force_reg (mode
, old_val
);
5534 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5535 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5536 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5537 new_val
= force_reg (mode
, new_val
);
5539 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5540 if (insn
== NULL_RTX
)
5547 /* Expand a compare-and-swap operation and return its value. */
5550 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5552 enum machine_mode mode
= GET_MODE (mem
);
5553 enum insn_code icode
= sync_compare_and_swap
[mode
];
5555 if (icode
== CODE_FOR_nothing
)
5558 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5561 /* Expand a compare-and-swap operation and store true into the result if
5562 the operation was successful and false otherwise. Return the result.
5563 Unlike other routines, TARGET is not optional. */
5566 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5568 enum machine_mode mode
= GET_MODE (mem
);
5569 enum insn_code icode
;
5570 rtx subtarget
, label0
, label1
;
5572 /* If the target supports a compare-and-swap pattern that simultaneously
5573 sets some flag for success, then use it. Otherwise use the regular
5574 compare-and-swap and follow that immediately with a compare insn. */
5575 icode
= sync_compare_and_swap_cc
[mode
];
5579 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5581 if (subtarget
!= NULL_RTX
)
5585 case CODE_FOR_nothing
:
5586 icode
= sync_compare_and_swap
[mode
];
5587 if (icode
== CODE_FOR_nothing
)
5590 /* Ensure that if old_val == mem, that we're not comparing
5591 against an old value. */
5592 if (MEM_P (old_val
))
5593 old_val
= force_reg (mode
, old_val
);
5595 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5597 if (subtarget
== NULL_RTX
)
5600 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5603 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5604 setcc instruction from the beginning. We don't work too hard here,
5605 but it's nice to not be stupid about initial code gen either. */
5606 if (STORE_FLAG_VALUE
== 1)
5608 icode
= setcc_gen_code
[EQ
];
5609 if (icode
!= CODE_FOR_nothing
)
5611 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5615 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5616 subtarget
= gen_reg_rtx (cmode
);
5618 insn
= GEN_FCN (icode
) (subtarget
);
5622 if (GET_MODE (target
) != GET_MODE (subtarget
))
5624 convert_move (target
, subtarget
, 1);
5632 /* Without an appropriate setcc instruction, use a set of branches to
5633 get 1 and 0 stored into target. Presumably if the target has a
5634 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5636 label0
= gen_label_rtx ();
5637 label1
= gen_label_rtx ();
5639 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
5640 emit_move_insn (target
, const0_rtx
);
5641 emit_jump_insn (gen_jump (label1
));
5643 emit_label (label0
);
5644 emit_move_insn (target
, const1_rtx
);
5645 emit_label (label1
);
5650 /* This is a helper function for the other atomic operations. This function
5651 emits a loop that contains SEQ that iterates until a compare-and-swap
5652 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5653 a set of instructions that takes a value from OLD_REG as an input and
5654 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5655 set to the current contents of MEM. After SEQ, a compare-and-swap will
5656 attempt to update MEM with NEW_REG. The function returns true when the
5657 loop was generated successfully. */
5660 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5662 enum machine_mode mode
= GET_MODE (mem
);
5663 enum insn_code icode
;
5664 rtx label
, cmp_reg
, subtarget
;
5666 /* The loop we want to generate looks like
5672 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5673 if (cmp_reg != old_reg)
5676 Note that we only do the plain load from memory once. Subsequent
5677 iterations use the value loaded by the compare-and-swap pattern. */
5679 label
= gen_label_rtx ();
5680 cmp_reg
= gen_reg_rtx (mode
);
5682 emit_move_insn (cmp_reg
, mem
);
5684 emit_move_insn (old_reg
, cmp_reg
);
5688 /* If the target supports a compare-and-swap pattern that simultaneously
5689 sets some flag for success, then use it. Otherwise use the regular
5690 compare-and-swap and follow that immediately with a compare insn. */
5691 icode
= sync_compare_and_swap_cc
[mode
];
5695 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5697 if (subtarget
!= NULL_RTX
)
5699 gcc_assert (subtarget
== cmp_reg
);
5704 case CODE_FOR_nothing
:
5705 icode
= sync_compare_and_swap
[mode
];
5706 if (icode
== CODE_FOR_nothing
)
5709 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5711 if (subtarget
== NULL_RTX
)
5713 if (subtarget
!= cmp_reg
)
5714 emit_move_insn (cmp_reg
, subtarget
);
5716 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
5719 /* ??? Mark this jump predicted not taken? */
5720 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
5725 /* This function generates the atomic operation MEM CODE= VAL. In this
5726 case, we do not care about any resulting value. Returns NULL if we
5727 cannot generate the operation. */
5730 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
5732 enum machine_mode mode
= GET_MODE (mem
);
5733 enum insn_code icode
;
5736 /* Look to see if the target supports the operation directly. */
5740 icode
= sync_add_optab
[mode
];
5743 icode
= sync_ior_optab
[mode
];
5746 icode
= sync_xor_optab
[mode
];
5749 icode
= sync_and_optab
[mode
];
5752 icode
= sync_nand_optab
[mode
];
5756 icode
= sync_sub_optab
[mode
];
5757 if (icode
== CODE_FOR_nothing
)
5759 icode
= sync_add_optab
[mode
];
5760 if (icode
!= CODE_FOR_nothing
)
5762 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5772 /* Generate the direct operation, if present. */
5773 if (icode
!= CODE_FOR_nothing
)
5775 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5776 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5777 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
5778 val
= force_reg (mode
, val
);
5780 insn
= GEN_FCN (icode
) (mem
, val
);
5788 /* Failing that, generate a compare-and-swap loop in which we perform the
5789 operation with normal arithmetic instructions. */
5790 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5792 rtx t0
= gen_reg_rtx (mode
), t1
;
5799 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5802 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5803 true, OPTAB_LIB_WIDEN
);
5805 insn
= get_insns ();
5808 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5815 /* This function generates the atomic operation MEM CODE= VAL. In this
5816 case, we do care about the resulting value: if AFTER is true then
5817 return the value MEM holds after the operation, if AFTER is false
5818 then return the value MEM holds before the operation. TARGET is an
5819 optional place for the result value to be stored. */
5822 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
5823 bool after
, rtx target
)
5825 enum machine_mode mode
= GET_MODE (mem
);
5826 enum insn_code old_code
, new_code
, icode
;
5830 /* Look to see if the target supports the operation directly. */
5834 old_code
= sync_old_add_optab
[mode
];
5835 new_code
= sync_new_add_optab
[mode
];
5838 old_code
= sync_old_ior_optab
[mode
];
5839 new_code
= sync_new_ior_optab
[mode
];
5842 old_code
= sync_old_xor_optab
[mode
];
5843 new_code
= sync_new_xor_optab
[mode
];
5846 old_code
= sync_old_and_optab
[mode
];
5847 new_code
= sync_new_and_optab
[mode
];
5850 old_code
= sync_old_nand_optab
[mode
];
5851 new_code
= sync_new_nand_optab
[mode
];
5855 old_code
= sync_old_sub_optab
[mode
];
5856 new_code
= sync_new_sub_optab
[mode
];
5857 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
5859 old_code
= sync_old_add_optab
[mode
];
5860 new_code
= sync_new_add_optab
[mode
];
5861 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
5863 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5873 /* If the target does supports the proper new/old operation, great. But
5874 if we only support the opposite old/new operation, check to see if we
5875 can compensate. In the case in which the old value is supported, then
5876 we can always perform the operation again with normal arithmetic. In
5877 the case in which the new value is supported, then we can only handle
5878 this in the case the operation is reversible. */
5883 if (icode
== CODE_FOR_nothing
)
5886 if (icode
!= CODE_FOR_nothing
)
5893 if (icode
== CODE_FOR_nothing
5894 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
5897 if (icode
!= CODE_FOR_nothing
)
5902 /* If we found something supported, great. */
5903 if (icode
!= CODE_FOR_nothing
)
5905 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5906 target
= gen_reg_rtx (mode
);
5908 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5909 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5910 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5911 val
= force_reg (mode
, val
);
5913 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5918 /* If we need to compensate for using an operation with the
5919 wrong return value, do so now. */
5926 else if (code
== MINUS
)
5931 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
5932 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
5933 true, OPTAB_LIB_WIDEN
);
5940 /* Failing that, generate a compare-and-swap loop in which we perform the
5941 operation with normal arithmetic instructions. */
5942 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5944 rtx t0
= gen_reg_rtx (mode
), t1
;
5946 if (!target
|| !register_operand (target
, mode
))
5947 target
= gen_reg_rtx (mode
);
5952 emit_move_insn (target
, t0
);
5956 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5959 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5960 true, OPTAB_LIB_WIDEN
);
5962 emit_move_insn (target
, t1
);
5964 insn
= get_insns ();
5967 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5974 /* This function expands a test-and-set operation. Ideally we atomically
5975 store VAL in MEM and return the previous value in MEM. Some targets
5976 may not support this operation and only support VAL with the constant 1;
5977 in this case while the return value will be 0/1, but the exact value
5978 stored in MEM is target defined. TARGET is an option place to stick
5979 the return value. */
5982 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
5984 enum machine_mode mode
= GET_MODE (mem
);
5985 enum insn_code icode
;
5988 /* If the target supports the test-and-set directly, great. */
5989 icode
= sync_lock_test_and_set
[mode
];
5990 if (icode
!= CODE_FOR_nothing
)
5992 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5993 target
= gen_reg_rtx (mode
);
5995 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5996 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5997 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5998 val
= force_reg (mode
, val
);
6000 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6008 /* Otherwise, use a compare-and-swap loop for the exchange. */
6009 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6011 if (!target
|| !register_operand (target
, mode
))
6012 target
= gen_reg_rtx (mode
);
6013 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6014 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6015 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6022 #include "gt-optabs.h"