1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_STORE_EXPR
:
298 return vec_realign_store_optab
;
300 case REALIGN_LOAD_EXPR
:
301 return vec_realign_load_optab
;
307 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
311 return trapv
? addv_optab
: add_optab
;
314 return trapv
? subv_optab
: sub_optab
;
317 return trapv
? smulv_optab
: smul_optab
;
320 return trapv
? negv_optab
: neg_optab
;
323 return trapv
? absv_optab
: abs_optab
;
331 /* Generate code to perform an operation specified by TERNARY_OPTAB
332 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
334 UNSIGNEDP is for the case where we have to widen the operands
335 to perform the operation. It says to use zero-extension.
337 If TARGET is nonzero, the value
338 is generated there, if it is convenient to do so.
339 In all cases an rtx is returned for the locus of the value;
340 this may or may not be TARGET. */
343 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
344 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
346 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
347 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
348 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
349 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
352 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
354 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
358 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
359 temp
= gen_reg_rtx (mode
);
363 /* In case the insn wants input operands in modes different from
364 those of the actual operands, convert the operands. It would
365 seem that we don't need to convert CONST_INTs, but we do, so
366 that they're properly zero-extended, sign-extended or truncated
369 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
370 xop0
= convert_modes (mode0
,
371 GET_MODE (op0
) != VOIDmode
376 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
377 xop1
= convert_modes (mode1
,
378 GET_MODE (op1
) != VOIDmode
383 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
384 xop2
= convert_modes (mode2
,
385 GET_MODE (op2
) != VOIDmode
390 /* Now, if insn's predicates don't allow our operands, put them into
393 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
394 && mode0
!= VOIDmode
)
395 xop0
= copy_to_mode_reg (mode0
, xop0
);
397 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
398 && mode1
!= VOIDmode
)
399 xop1
= copy_to_mode_reg (mode1
, xop1
);
401 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
402 && mode2
!= VOIDmode
)
403 xop2
= copy_to_mode_reg (mode2
, xop2
);
405 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
412 /* Like expand_binop, but return a constant rtx if the result can be
413 calculated at compile time. The arguments and return value are
414 otherwise the same as for expand_binop. */
417 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
418 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
419 enum optab_methods methods
)
421 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
422 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
424 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
427 /* Like simplify_expand_binop, but always put the result in TARGET.
428 Return true if the expansion succeeded. */
431 force_expand_binop (enum machine_mode mode
, optab binoptab
,
432 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
433 enum optab_methods methods
)
435 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
436 target
, unsignedp
, methods
);
440 emit_move_insn (target
, x
);
444 /* This subroutine of expand_doubleword_shift handles the cases in which
445 the effective shift value is >= BITS_PER_WORD. The arguments and return
446 value are the same as for the parent routine, except that SUPERWORD_OP1
447 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
448 INTO_TARGET may be null if the caller has decided to calculate it. */
451 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
452 rtx outof_target
, rtx into_target
,
453 int unsignedp
, enum optab_methods methods
)
455 if (into_target
!= 0)
456 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
457 into_target
, unsignedp
, methods
))
460 if (outof_target
!= 0)
462 /* For a signed right shift, we must fill OUTOF_TARGET with copies
463 of the sign bit, otherwise we must fill it with zeros. */
464 if (binoptab
!= ashr_optab
)
465 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
467 if (!force_expand_binop (word_mode
, binoptab
,
468 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
469 outof_target
, unsignedp
, methods
))
475 /* This subroutine of expand_doubleword_shift handles the cases in which
476 the effective shift value is < BITS_PER_WORD. The arguments and return
477 value are the same as for the parent routine. */
480 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
481 rtx outof_input
, rtx into_input
, rtx op1
,
482 rtx outof_target
, rtx into_target
,
483 int unsignedp
, enum optab_methods methods
,
484 unsigned HOST_WIDE_INT shift_mask
)
486 optab reverse_unsigned_shift
, unsigned_shift
;
489 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
490 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
492 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
493 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
494 the opposite direction to BINOPTAB. */
495 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
497 carries
= outof_input
;
498 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
499 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
504 /* We must avoid shifting by BITS_PER_WORD bits since that is either
505 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
506 has unknown behavior. Do a single shift first, then shift by the
507 remainder. It's OK to use ~OP1 as the remainder if shift counts
508 are truncated to the mode size. */
509 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
510 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
511 if (shift_mask
== BITS_PER_WORD
- 1)
513 tmp
= immed_double_const (-1, -1, op1_mode
);
514 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
519 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
520 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
524 if (tmp
== 0 || carries
== 0)
526 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
527 carries
, tmp
, 0, unsignedp
, methods
);
531 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
532 so the result can go directly into INTO_TARGET if convenient. */
533 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
534 into_target
, unsignedp
, methods
);
538 /* Now OR in the bits carried over from OUTOF_INPUT. */
539 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
540 into_target
, unsignedp
, methods
))
543 /* Use a standard word_mode shift for the out-of half. */
544 if (outof_target
!= 0)
545 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
546 outof_target
, unsignedp
, methods
))
553 #ifdef HAVE_conditional_move
554 /* Try implementing expand_doubleword_shift using conditional moves.
555 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
556 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
557 are the shift counts to use in the former and latter case. All other
558 arguments are the same as the parent routine. */
561 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
562 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
563 rtx outof_input
, rtx into_input
,
564 rtx subword_op1
, rtx superword_op1
,
565 rtx outof_target
, rtx into_target
,
566 int unsignedp
, enum optab_methods methods
,
567 unsigned HOST_WIDE_INT shift_mask
)
569 rtx outof_superword
, into_superword
;
571 /* Put the superword version of the output into OUTOF_SUPERWORD and
573 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
574 if (outof_target
!= 0 && subword_op1
== superword_op1
)
576 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
577 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
578 into_superword
= outof_target
;
579 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
580 outof_superword
, 0, unsignedp
, methods
))
585 into_superword
= gen_reg_rtx (word_mode
);
586 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
587 outof_superword
, into_superword
,
592 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
593 if (!expand_subword_shift (op1_mode
, binoptab
,
594 outof_input
, into_input
, subword_op1
,
595 outof_target
, into_target
,
596 unsignedp
, methods
, shift_mask
))
599 /* Select between them. Do the INTO half first because INTO_SUPERWORD
600 might be the current value of OUTOF_TARGET. */
601 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
602 into_target
, into_superword
, word_mode
, false))
605 if (outof_target
!= 0)
606 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
607 outof_target
, outof_superword
,
615 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
616 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
617 input operand; the shift moves bits in the direction OUTOF_INPUT->
618 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
619 of the target. OP1 is the shift count and OP1_MODE is its mode.
620 If OP1 is constant, it will have been truncated as appropriate
621 and is known to be nonzero.
623 If SHIFT_MASK is zero, the result of word shifts is undefined when the
624 shift count is outside the range [0, BITS_PER_WORD). This routine must
625 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
627 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
628 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
629 fill with zeros or sign bits as appropriate.
631 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
632 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
633 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
634 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
637 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
638 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
639 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
640 function wants to calculate it itself.
642 Return true if the shift could be successfully synthesized. */
645 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
646 rtx outof_input
, rtx into_input
, rtx op1
,
647 rtx outof_target
, rtx into_target
,
648 int unsignedp
, enum optab_methods methods
,
649 unsigned HOST_WIDE_INT shift_mask
)
651 rtx superword_op1
, tmp
, cmp1
, cmp2
;
652 rtx subword_label
, done_label
;
653 enum rtx_code cmp_code
;
655 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
656 fill the result with sign or zero bits as appropriate. If so, the value
657 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
658 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
659 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
661 This isn't worthwhile for constant shifts since the optimizers will
662 cope better with in-range shift counts. */
663 if (shift_mask
>= BITS_PER_WORD
665 && !CONSTANT_P (op1
))
667 if (!expand_doubleword_shift (op1_mode
, binoptab
,
668 outof_input
, into_input
, op1
,
670 unsignedp
, methods
, shift_mask
))
672 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
673 outof_target
, unsignedp
, methods
))
678 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
679 is true when the effective shift value is less than BITS_PER_WORD.
680 Set SUPERWORD_OP1 to the shift count that should be used to shift
681 OUTOF_INPUT into INTO_TARGET when the condition is false. */
682 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
683 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
685 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
686 is a subword shift count. */
687 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
689 cmp2
= CONST0_RTX (op1_mode
);
695 /* Set CMP1 to OP1 - BITS_PER_WORD. */
696 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
698 cmp2
= CONST0_RTX (op1_mode
);
700 superword_op1
= cmp1
;
705 /* If we can compute the condition at compile time, pick the
706 appropriate subroutine. */
707 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
708 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
710 if (tmp
== const0_rtx
)
711 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
712 outof_target
, into_target
,
715 return expand_subword_shift (op1_mode
, binoptab
,
716 outof_input
, into_input
, op1
,
717 outof_target
, into_target
,
718 unsignedp
, methods
, shift_mask
);
721 #ifdef HAVE_conditional_move
722 /* Try using conditional moves to generate straight-line code. */
724 rtx start
= get_last_insn ();
725 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
726 cmp_code
, cmp1
, cmp2
,
727 outof_input
, into_input
,
729 outof_target
, into_target
,
730 unsignedp
, methods
, shift_mask
))
732 delete_insns_since (start
);
736 /* As a last resort, use branches to select the correct alternative. */
737 subword_label
= gen_label_rtx ();
738 done_label
= gen_label_rtx ();
740 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
741 0, 0, subword_label
);
743 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
744 outof_target
, into_target
,
748 emit_jump_insn (gen_jump (done_label
));
750 emit_label (subword_label
);
752 if (!expand_subword_shift (op1_mode
, binoptab
,
753 outof_input
, into_input
, op1
,
754 outof_target
, into_target
,
755 unsignedp
, methods
, shift_mask
))
758 emit_label (done_label
);
762 /* Wrapper around expand_binop which takes an rtx code to specify
763 the operation to perform, not an optab pointer. All other
764 arguments are the same. */
766 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
767 rtx op1
, rtx target
, int unsignedp
,
768 enum optab_methods methods
)
770 optab binop
= code_to_optab
[(int) code
];
774 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
777 /* Generate code to perform an operation specified by BINOPTAB
778 on operands OP0 and OP1, with result having machine-mode MODE.
780 UNSIGNEDP is for the case where we have to widen the operands
781 to perform the operation. It says to use zero-extension.
783 If TARGET is nonzero, the value
784 is generated there, if it is convenient to do so.
785 In all cases an rtx is returned for the locus of the value;
786 this may or may not be TARGET. */
789 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
790 rtx target
, int unsignedp
, enum optab_methods methods
)
792 enum optab_methods next_methods
793 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
794 ? OPTAB_WIDEN
: methods
);
795 enum mode_class
class;
796 enum machine_mode wider_mode
;
798 int commutative_op
= 0;
799 int shift_op
= (binoptab
->code
== ASHIFT
800 || binoptab
->code
== ASHIFTRT
801 || binoptab
->code
== LSHIFTRT
802 || binoptab
->code
== ROTATE
803 || binoptab
->code
== ROTATERT
);
804 rtx entry_last
= get_last_insn ();
807 class = GET_MODE_CLASS (mode
);
811 /* Load duplicate non-volatile operands once. */
812 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
814 op0
= force_not_mem (op0
);
819 op0
= force_not_mem (op0
);
820 op1
= force_not_mem (op1
);
824 /* If subtracting an integer constant, convert this into an addition of
825 the negated constant. */
827 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
829 op1
= negate_rtx (mode
, op1
);
830 binoptab
= add_optab
;
833 /* If we are inside an appropriately-short loop and we are optimizing,
834 force expensive constants into a register. */
835 if (CONSTANT_P (op0
) && optimize
836 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
838 if (GET_MODE (op0
) != VOIDmode
)
839 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
840 op0
= force_reg (mode
, op0
);
843 if (CONSTANT_P (op1
) && optimize
844 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
846 if (GET_MODE (op1
) != VOIDmode
)
847 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
848 op1
= force_reg (mode
, op1
);
851 /* Record where to delete back to if we backtrack. */
852 last
= get_last_insn ();
854 /* If operation is commutative,
855 try to make the first operand a register.
856 Even better, try to make it the same as the target.
857 Also try to make the last operand a constant. */
858 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
859 || binoptab
== smul_widen_optab
860 || binoptab
== umul_widen_optab
861 || binoptab
== smul_highpart_optab
862 || binoptab
== umul_highpart_optab
)
866 if (((target
== 0 || REG_P (target
))
870 : rtx_equal_p (op1
, target
))
871 || GET_CODE (op0
) == CONST_INT
)
879 /* If we can do it with a three-operand insn, do so. */
881 if (methods
!= OPTAB_MUST_WIDEN
882 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
884 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
885 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
886 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
888 rtx xop0
= op0
, xop1
= op1
;
893 temp
= gen_reg_rtx (mode
);
895 /* If it is a commutative operator and the modes would match
896 if we would swap the operands, we can save the conversions. */
899 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
900 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
904 tmp
= op0
; op0
= op1
; op1
= tmp
;
905 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
909 /* In case the insn wants input operands in modes different from
910 those of the actual operands, convert the operands. It would
911 seem that we don't need to convert CONST_INTs, but we do, so
912 that they're properly zero-extended, sign-extended or truncated
915 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
916 xop0
= convert_modes (mode0
,
917 GET_MODE (op0
) != VOIDmode
922 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
923 xop1
= convert_modes (mode1
,
924 GET_MODE (op1
) != VOIDmode
929 /* Now, if insn's predicates don't allow our operands, put them into
932 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
933 && mode0
!= VOIDmode
)
934 xop0
= copy_to_mode_reg (mode0
, xop0
);
936 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
937 && mode1
!= VOIDmode
)
938 xop1
= copy_to_mode_reg (mode1
, xop1
);
940 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
941 temp
= gen_reg_rtx (mode
);
943 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
946 /* If PAT is composed of more than one insn, try to add an appropriate
947 REG_EQUAL note to it. If we can't because TEMP conflicts with an
948 operand, call ourselves again, this time without a target. */
949 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
950 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
952 delete_insns_since (last
);
953 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
961 delete_insns_since (last
);
964 /* If this is a multiply, see if we can do a widening operation that
965 takes operands of this mode and makes a wider mode. */
967 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
968 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
969 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
970 != CODE_FOR_nothing
))
972 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
973 unsignedp
? umul_widen_optab
: smul_widen_optab
,
974 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
978 if (GET_MODE_CLASS (mode
) == MODE_INT
)
979 return gen_lowpart (mode
, temp
);
981 return convert_to_mode (mode
, temp
, unsignedp
);
985 /* Look for a wider mode of the same class for which we think we
986 can open-code the operation. Check for a widening multiply at the
987 wider mode as well. */
989 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
990 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
991 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
992 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
994 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
995 || (binoptab
== smul_optab
996 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
997 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
998 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
999 != CODE_FOR_nothing
)))
1001 rtx xop0
= op0
, xop1
= op1
;
1004 /* For certain integer operations, we need not actually extend
1005 the narrow operands, as long as we will truncate
1006 the results to the same narrowness. */
1008 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1009 || binoptab
== xor_optab
1010 || binoptab
== add_optab
|| binoptab
== sub_optab
1011 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1012 && class == MODE_INT
)
1015 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1017 /* The second operand of a shift must always be extended. */
1018 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1019 no_extend
&& binoptab
!= ashl_optab
);
1021 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1022 unsignedp
, OPTAB_DIRECT
);
1025 if (class != MODE_INT
)
1028 target
= gen_reg_rtx (mode
);
1029 convert_move (target
, temp
, 0);
1033 return gen_lowpart (mode
, temp
);
1036 delete_insns_since (last
);
1040 /* These can be done a word at a time. */
1041 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1042 && class == MODE_INT
1043 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1044 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1050 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1051 won't be accurate, so use a new target. */
1052 if (target
== 0 || target
== op0
|| target
== op1
)
1053 target
= gen_reg_rtx (mode
);
1057 /* Do the actual arithmetic. */
1058 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1060 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1061 rtx x
= expand_binop (word_mode
, binoptab
,
1062 operand_subword_force (op0
, i
, mode
),
1063 operand_subword_force (op1
, i
, mode
),
1064 target_piece
, unsignedp
, next_methods
);
1069 if (target_piece
!= x
)
1070 emit_move_insn (target_piece
, x
);
1073 insns
= get_insns ();
1076 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1078 if (binoptab
->code
!= UNKNOWN
)
1080 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1081 copy_rtx (op0
), copy_rtx (op1
));
1085 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1090 /* Synthesize double word shifts from single word shifts. */
1091 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1092 || binoptab
== ashr_optab
)
1093 && class == MODE_INT
1094 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1095 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1096 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1097 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1098 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1100 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1101 enum machine_mode op1_mode
;
1103 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1104 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1105 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1107 /* Apply the truncation to constant shifts. */
1108 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1109 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1111 if (op1
== CONST0_RTX (op1_mode
))
1114 /* Make sure that this is a combination that expand_doubleword_shift
1115 can handle. See the comments there for details. */
1116 if (double_shift_mask
== 0
1117 || (shift_mask
== BITS_PER_WORD
- 1
1118 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1120 rtx insns
, equiv_value
;
1121 rtx into_target
, outof_target
;
1122 rtx into_input
, outof_input
;
1123 int left_shift
, outof_word
;
1125 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1126 won't be accurate, so use a new target. */
1127 if (target
== 0 || target
== op0
|| target
== op1
)
1128 target
= gen_reg_rtx (mode
);
1132 /* OUTOF_* is the word we are shifting bits away from, and
1133 INTO_* is the word that we are shifting bits towards, thus
1134 they differ depending on the direction of the shift and
1135 WORDS_BIG_ENDIAN. */
1137 left_shift
= binoptab
== ashl_optab
;
1138 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1140 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1141 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1143 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1144 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1146 if (expand_doubleword_shift (op1_mode
, binoptab
,
1147 outof_input
, into_input
, op1
,
1148 outof_target
, into_target
,
1149 unsignedp
, methods
, shift_mask
))
1151 insns
= get_insns ();
1154 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1155 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1162 /* Synthesize double word rotates from single word shifts. */
1163 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1164 && class == MODE_INT
1165 && GET_CODE (op1
) == CONST_INT
1166 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1167 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1168 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1170 rtx insns
, equiv_value
;
1171 rtx into_target
, outof_target
;
1172 rtx into_input
, outof_input
;
1174 int shift_count
, left_shift
, outof_word
;
1176 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1177 won't be accurate, so use a new target. Do this also if target is not
1178 a REG, first because having a register instead may open optimization
1179 opportunities, and second because if target and op0 happen to be MEMs
1180 designating the same location, we would risk clobbering it too early
1181 in the code sequence we generate below. */
1182 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1183 target
= gen_reg_rtx (mode
);
1187 shift_count
= INTVAL (op1
);
1189 /* OUTOF_* is the word we are shifting bits away from, and
1190 INTO_* is the word that we are shifting bits towards, thus
1191 they differ depending on the direction of the shift and
1192 WORDS_BIG_ENDIAN. */
1194 left_shift
= (binoptab
== rotl_optab
);
1195 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1197 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1198 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1200 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1201 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1203 if (shift_count
== BITS_PER_WORD
)
1205 /* This is just a word swap. */
1206 emit_move_insn (outof_target
, into_input
);
1207 emit_move_insn (into_target
, outof_input
);
1212 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1213 rtx first_shift_count
, second_shift_count
;
1214 optab reverse_unsigned_shift
, unsigned_shift
;
1216 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1217 ? lshr_optab
: ashl_optab
);
1219 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1220 ? ashl_optab
: lshr_optab
);
1222 if (shift_count
> BITS_PER_WORD
)
1224 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1225 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1229 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1230 second_shift_count
= GEN_INT (shift_count
);
1233 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1234 outof_input
, first_shift_count
,
1235 NULL_RTX
, unsignedp
, next_methods
);
1236 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1237 into_input
, second_shift_count
,
1238 NULL_RTX
, unsignedp
, next_methods
);
1240 if (into_temp1
!= 0 && into_temp2
!= 0)
1241 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1242 into_target
, unsignedp
, next_methods
);
1246 if (inter
!= 0 && inter
!= into_target
)
1247 emit_move_insn (into_target
, inter
);
1249 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1250 into_input
, first_shift_count
,
1251 NULL_RTX
, unsignedp
, next_methods
);
1252 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1253 outof_input
, second_shift_count
,
1254 NULL_RTX
, unsignedp
, next_methods
);
1256 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1257 inter
= expand_binop (word_mode
, ior_optab
,
1258 outof_temp1
, outof_temp2
,
1259 outof_target
, unsignedp
, next_methods
);
1261 if (inter
!= 0 && inter
!= outof_target
)
1262 emit_move_insn (outof_target
, inter
);
1265 insns
= get_insns ();
1270 if (binoptab
->code
!= UNKNOWN
)
1271 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1275 /* We can't make this a no conflict block if this is a word swap,
1276 because the word swap case fails if the input and output values
1277 are in the same register. */
1278 if (shift_count
!= BITS_PER_WORD
)
1279 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1288 /* These can be done a word at a time by propagating carries. */
1289 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1290 && class == MODE_INT
1291 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1292 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1295 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1296 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1297 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1298 rtx xop0
, xop1
, xtarget
;
1300 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1301 value is one of those, use it. Otherwise, use 1 since it is the
1302 one easiest to get. */
1303 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1304 int normalizep
= STORE_FLAG_VALUE
;
1309 /* Prepare the operands. */
1310 xop0
= force_reg (mode
, op0
);
1311 xop1
= force_reg (mode
, op1
);
1313 xtarget
= gen_reg_rtx (mode
);
1315 if (target
== 0 || !REG_P (target
))
1318 /* Indicate for flow that the entire target reg is being set. */
1320 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1322 /* Do the actual arithmetic. */
1323 for (i
= 0; i
< nwords
; i
++)
1325 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1326 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1327 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1328 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1331 /* Main add/subtract of the input operands. */
1332 x
= expand_binop (word_mode
, binoptab
,
1333 op0_piece
, op1_piece
,
1334 target_piece
, unsignedp
, next_methods
);
1340 /* Store carry from main add/subtract. */
1341 carry_out
= gen_reg_rtx (word_mode
);
1342 carry_out
= emit_store_flag_force (carry_out
,
1343 (binoptab
== add_optab
1346 word_mode
, 1, normalizep
);
1353 /* Add/subtract previous carry to main result. */
1354 newx
= expand_binop (word_mode
,
1355 normalizep
== 1 ? binoptab
: otheroptab
,
1357 NULL_RTX
, 1, next_methods
);
1361 /* Get out carry from adding/subtracting carry in. */
1362 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1363 carry_tmp
= emit_store_flag_force (carry_tmp
,
1364 (binoptab
== add_optab
1367 word_mode
, 1, normalizep
);
1369 /* Logical-ior the two poss. carry together. */
1370 carry_out
= expand_binop (word_mode
, ior_optab
,
1371 carry_out
, carry_tmp
,
1372 carry_out
, 0, next_methods
);
1376 emit_move_insn (target_piece
, newx
);
1379 carry_in
= carry_out
;
1382 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1384 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1385 || ! rtx_equal_p (target
, xtarget
))
1387 rtx temp
= emit_move_insn (target
, xtarget
);
1389 set_unique_reg_note (temp
,
1391 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1402 delete_insns_since (last
);
1405 /* If we want to multiply two two-word values and have normal and widening
1406 multiplies of single-word values, we can do this with three smaller
1407 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1408 because we are not operating on one word at a time.
1410 The multiplication proceeds as follows:
1411 _______________________
1412 [__op0_high_|__op0_low__]
1413 _______________________
1414 * [__op1_high_|__op1_low__]
1415 _______________________________________________
1416 _______________________
1417 (1) [__op0_low__*__op1_low__]
1418 _______________________
1419 (2a) [__op0_low__*__op1_high_]
1420 _______________________
1421 (2b) [__op0_high_*__op1_low__]
1422 _______________________
1423 (3) [__op0_high_*__op1_high_]
1426 This gives a 4-word result. Since we are only interested in the
1427 lower 2 words, partial result (3) and the upper words of (2a) and
1428 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1429 calculated using non-widening multiplication.
1431 (1), however, needs to be calculated with an unsigned widening
1432 multiplication. If this operation is not directly supported we
1433 try using a signed widening multiplication and adjust the result.
1434 This adjustment works as follows:
1436 If both operands are positive then no adjustment is needed.
1438 If the operands have different signs, for example op0_low < 0 and
1439 op1_low >= 0, the instruction treats the most significant bit of
1440 op0_low as a sign bit instead of a bit with significance
1441 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1442 with 2**BITS_PER_WORD - op0_low, and two's complements the
1443 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1446 Similarly, if both operands are negative, we need to add
1447 (op0_low + op1_low) * 2**BITS_PER_WORD.
1449 We use a trick to adjust quickly. We logically shift op0_low right
1450 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1451 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1452 logical shift exists, we do an arithmetic right shift and subtract
1455 if (binoptab
== smul_optab
1456 && class == MODE_INT
1457 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1458 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1459 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1460 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1461 != CODE_FOR_nothing
)
1462 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1463 != CODE_FOR_nothing
)))
1465 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1466 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1467 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1468 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1469 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1470 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1472 rtx op0_xhigh
= NULL_RTX
;
1473 rtx op1_xhigh
= NULL_RTX
;
1475 /* If the target is the same as one of the inputs, don't use it. This
1476 prevents problems with the REG_EQUAL note. */
1477 if (target
== op0
|| target
== op1
1478 || (target
!= 0 && !REG_P (target
)))
1481 /* Multiply the two lower words to get a double-word product.
1482 If unsigned widening multiplication is available, use that;
1483 otherwise use the signed form and compensate. */
1485 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1487 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1488 target
, 1, OPTAB_DIRECT
);
1490 /* If we didn't succeed, delete everything we did so far. */
1492 delete_insns_since (last
);
1494 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1498 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1499 != CODE_FOR_nothing
)
1501 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1502 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1503 target
, 1, OPTAB_DIRECT
);
1504 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1505 NULL_RTX
, 1, next_methods
);
1507 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1508 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1511 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1512 NULL_RTX
, 0, next_methods
);
1514 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1515 op0_xhigh
, op0_xhigh
, 0,
1519 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1520 NULL_RTX
, 1, next_methods
);
1522 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1523 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1526 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1527 NULL_RTX
, 0, next_methods
);
1529 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1530 op1_xhigh
, op1_xhigh
, 0,
1535 /* If we have been able to directly compute the product of the
1536 low-order words of the operands and perform any required adjustments
1537 of the operands, we proceed by trying two more multiplications
1538 and then computing the appropriate sum.
1540 We have checked above that the required addition is provided.
1541 Full-word addition will normally always succeed, especially if
1542 it is provided at all, so we don't worry about its failure. The
1543 multiplication may well fail, however, so we do handle that. */
1545 if (product
&& op0_xhigh
&& op1_xhigh
)
1547 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1548 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1549 NULL_RTX
, 0, OPTAB_DIRECT
);
1551 if (!REG_P (product_high
))
1552 product_high
= force_reg (word_mode
, product_high
);
1555 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1556 product_high
, 0, next_methods
);
1558 if (temp
!= 0 && temp
!= product_high
)
1559 emit_move_insn (product_high
, temp
);
1562 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1563 NULL_RTX
, 0, OPTAB_DIRECT
);
1566 temp
= expand_binop (word_mode
, add_optab
, temp
,
1567 product_high
, product_high
,
1570 if (temp
!= 0 && temp
!= product_high
)
1571 emit_move_insn (product_high
, temp
);
1573 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1577 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1579 temp
= emit_move_insn (product
, product
);
1580 set_unique_reg_note (temp
,
1582 gen_rtx_fmt_ee (MULT
, mode
,
1591 /* If we get here, we couldn't do it for some reason even though we
1592 originally thought we could. Delete anything we've emitted in
1595 delete_insns_since (last
);
1598 /* It can't be open-coded in this mode.
1599 Use a library call if one is available and caller says that's ok. */
1601 if (binoptab
->handlers
[(int) mode
].libfunc
1602 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1606 enum machine_mode op1_mode
= mode
;
1613 op1_mode
= word_mode
;
1614 /* Specify unsigned here,
1615 since negative shift counts are meaningless. */
1616 op1x
= convert_to_mode (word_mode
, op1
, 1);
1619 if (GET_MODE (op0
) != VOIDmode
1620 && GET_MODE (op0
) != mode
)
1621 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1623 /* Pass 1 for NO_QUEUE so we don't lose any increments
1624 if the libcall is cse'd or moved. */
1625 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1626 NULL_RTX
, LCT_CONST
, mode
, 2,
1627 op0
, mode
, op1x
, op1_mode
);
1629 insns
= get_insns ();
1632 target
= gen_reg_rtx (mode
);
1633 emit_libcall_block (insns
, target
, value
,
1634 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1639 delete_insns_since (last
);
1641 /* It can't be done in this mode. Can we do it in a wider mode? */
1643 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1644 || methods
== OPTAB_MUST_WIDEN
))
1646 /* Caller says, don't even try. */
1647 delete_insns_since (entry_last
);
1651 /* Compute the value of METHODS to pass to recursive calls.
1652 Don't allow widening to be tried recursively. */
1654 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1656 /* Look for a wider mode of the same class for which it appears we can do
1659 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1661 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1662 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1664 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1665 != CODE_FOR_nothing
)
1666 || (methods
== OPTAB_LIB
1667 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1669 rtx xop0
= op0
, xop1
= op1
;
1672 /* For certain integer operations, we need not actually extend
1673 the narrow operands, as long as we will truncate
1674 the results to the same narrowness. */
1676 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1677 || binoptab
== xor_optab
1678 || binoptab
== add_optab
|| binoptab
== sub_optab
1679 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1680 && class == MODE_INT
)
1683 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1684 unsignedp
, no_extend
);
1686 /* The second operand of a shift must always be extended. */
1687 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1688 no_extend
&& binoptab
!= ashl_optab
);
1690 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1691 unsignedp
, methods
);
1694 if (class != MODE_INT
)
1697 target
= gen_reg_rtx (mode
);
1698 convert_move (target
, temp
, 0);
1702 return gen_lowpart (mode
, temp
);
1705 delete_insns_since (last
);
1710 delete_insns_since (entry_last
);
1714 /* Expand a binary operator which has both signed and unsigned forms.
1715 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1718 If we widen unsigned operands, we may use a signed wider operation instead
1719 of an unsigned wider operation, since the result would be the same. */
1722 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1723 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1724 enum optab_methods methods
)
1727 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1728 struct optab wide_soptab
;
1730 /* Do it without widening, if possible. */
1731 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1732 unsignedp
, OPTAB_DIRECT
);
1733 if (temp
|| methods
== OPTAB_DIRECT
)
1736 /* Try widening to a signed int. Make a fake signed optab that
1737 hides any signed insn for direct use. */
1738 wide_soptab
= *soptab
;
1739 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1740 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1742 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1743 unsignedp
, OPTAB_WIDEN
);
1745 /* For unsigned operands, try widening to an unsigned int. */
1746 if (temp
== 0 && unsignedp
)
1747 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1748 unsignedp
, OPTAB_WIDEN
);
1749 if (temp
|| methods
== OPTAB_WIDEN
)
1752 /* Use the right width lib call if that exists. */
1753 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1754 if (temp
|| methods
== OPTAB_LIB
)
1757 /* Must widen and use a lib call, use either signed or unsigned. */
1758 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1759 unsignedp
, methods
);
1763 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1764 unsignedp
, methods
);
1768 /* Generate code to perform an operation specified by UNOPPTAB
1769 on operand OP0, with two results to TARG0 and TARG1.
1770 We assume that the order of the operands for the instruction
1771 is TARG0, TARG1, OP0.
1773 Either TARG0 or TARG1 may be zero, but what that means is that
1774 the result is not actually wanted. We will generate it into
1775 a dummy pseudo-reg and discard it. They may not both be zero.
1777 Returns 1 if this operation can be performed; 0 if not. */
1780 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1783 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1784 enum mode_class
class;
1785 enum machine_mode wider_mode
;
1786 rtx entry_last
= get_last_insn ();
1789 class = GET_MODE_CLASS (mode
);
1792 op0
= force_not_mem (op0
);
1795 targ0
= gen_reg_rtx (mode
);
1797 targ1
= gen_reg_rtx (mode
);
1799 /* Record where to go back to if we fail. */
1800 last
= get_last_insn ();
1802 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1804 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1805 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1809 if (GET_MODE (xop0
) != VOIDmode
1810 && GET_MODE (xop0
) != mode0
)
1811 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1813 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1814 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1815 xop0
= copy_to_mode_reg (mode0
, xop0
);
1817 /* We could handle this, but we should always be called with a pseudo
1818 for our targets and all insns should take them as outputs. */
1819 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1820 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1823 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1830 delete_insns_since (last
);
1833 /* It can't be done in this mode. Can we do it in a wider mode? */
1835 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1837 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1838 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1840 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1841 != CODE_FOR_nothing
)
1843 rtx t0
= gen_reg_rtx (wider_mode
);
1844 rtx t1
= gen_reg_rtx (wider_mode
);
1845 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1847 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1849 convert_move (targ0
, t0
, unsignedp
);
1850 convert_move (targ1
, t1
, unsignedp
);
1854 delete_insns_since (last
);
1859 delete_insns_since (entry_last
);
1863 /* Generate code to perform an operation specified by BINOPTAB
1864 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1865 We assume that the order of the operands for the instruction
1866 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1867 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1869 Either TARG0 or TARG1 may be zero, but what that means is that
1870 the result is not actually wanted. We will generate it into
1871 a dummy pseudo-reg and discard it. They may not both be zero.
1873 Returns 1 if this operation can be performed; 0 if not. */
1876 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1879 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1880 enum mode_class
class;
1881 enum machine_mode wider_mode
;
1882 rtx entry_last
= get_last_insn ();
1885 class = GET_MODE_CLASS (mode
);
1889 op0
= force_not_mem (op0
);
1890 op1
= force_not_mem (op1
);
1893 /* If we are inside an appropriately-short loop and we are optimizing,
1894 force expensive constants into a register. */
1895 if (CONSTANT_P (op0
) && optimize
1896 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1897 op0
= force_reg (mode
, op0
);
1899 if (CONSTANT_P (op1
) && optimize
1900 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1901 op1
= force_reg (mode
, op1
);
1904 targ0
= gen_reg_rtx (mode
);
1906 targ1
= gen_reg_rtx (mode
);
1908 /* Record where to go back to if we fail. */
1909 last
= get_last_insn ();
1911 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1913 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1914 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1915 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1917 rtx xop0
= op0
, xop1
= op1
;
1919 /* In case the insn wants input operands in modes different from
1920 those of the actual operands, convert the operands. It would
1921 seem that we don't need to convert CONST_INTs, but we do, so
1922 that they're properly zero-extended, sign-extended or truncated
1925 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1926 xop0
= convert_modes (mode0
,
1927 GET_MODE (op0
) != VOIDmode
1932 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1933 xop1
= convert_modes (mode1
,
1934 GET_MODE (op1
) != VOIDmode
1939 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1940 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1941 xop0
= copy_to_mode_reg (mode0
, xop0
);
1943 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1944 xop1
= copy_to_mode_reg (mode1
, xop1
);
1946 /* We could handle this, but we should always be called with a pseudo
1947 for our targets and all insns should take them as outputs. */
1948 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1949 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1952 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1959 delete_insns_since (last
);
1962 /* It can't be done in this mode. Can we do it in a wider mode? */
1964 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1966 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1967 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1969 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1970 != CODE_FOR_nothing
)
1972 rtx t0
= gen_reg_rtx (wider_mode
);
1973 rtx t1
= gen_reg_rtx (wider_mode
);
1974 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1975 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1977 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1980 convert_move (targ0
, t0
, unsignedp
);
1981 convert_move (targ1
, t1
, unsignedp
);
1985 delete_insns_since (last
);
1990 delete_insns_since (entry_last
);
1994 /* Expand the two-valued library call indicated by BINOPTAB, but
1995 preserve only one of the values. If TARG0 is non-NULL, the first
1996 value is placed into TARG0; otherwise the second value is placed
1997 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
1998 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
1999 This routine assumes that the value returned by the library call is
2000 as if the return value was of an integral mode twice as wide as the
2001 mode of OP0. Returns 1 if the call was successful. */
2004 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2005 rtx targ0
, rtx targ1
, enum rtx_code code
)
2007 enum machine_mode mode
;
2008 enum machine_mode libval_mode
;
2012 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2013 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2016 mode
= GET_MODE (op0
);
2017 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2020 /* The value returned by the library function will have twice as
2021 many bits as the nominal MODE. */
2022 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2025 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2026 NULL_RTX
, LCT_CONST
,
2030 /* Get the part of VAL containing the value that we want. */
2031 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2032 targ0
? 0 : GET_MODE_SIZE (mode
));
2033 insns
= get_insns ();
2035 /* Move the into the desired location. */
2036 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2037 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2043 /* Wrapper around expand_unop which takes an rtx code to specify
2044 the operation to perform, not an optab pointer. All other
2045 arguments are the same. */
2047 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2048 rtx target
, int unsignedp
)
2050 optab unop
= code_to_optab
[(int) code
];
2054 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2060 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2062 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2064 enum mode_class
class = GET_MODE_CLASS (mode
);
2065 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2067 enum machine_mode wider_mode
;
2068 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2069 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2071 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2072 != CODE_FOR_nothing
)
2074 rtx xop0
, temp
, last
;
2076 last
= get_last_insn ();
2079 target
= gen_reg_rtx (mode
);
2080 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2081 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2083 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2084 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2085 - GET_MODE_BITSIZE (mode
)),
2086 target
, true, OPTAB_DIRECT
);
2088 delete_insns_since (last
);
2097 /* Try calculating (parity x) as (and (popcount x) 1), where
2098 popcount can also be done in a wider mode. */
2100 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2102 enum mode_class
class = GET_MODE_CLASS (mode
);
2103 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2105 enum machine_mode wider_mode
;
2106 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2107 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2109 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2110 != CODE_FOR_nothing
)
2112 rtx xop0
, temp
, last
;
2114 last
= get_last_insn ();
2117 target
= gen_reg_rtx (mode
);
2118 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2119 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2122 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2123 target
, true, OPTAB_DIRECT
);
2125 delete_insns_since (last
);
2134 /* Generate code to perform an operation specified by UNOPTAB
2135 on operand OP0, with result having machine-mode MODE.
2137 UNSIGNEDP is for the case where we have to widen the operands
2138 to perform the operation. It says to use zero-extension.
2140 If TARGET is nonzero, the value
2141 is generated there, if it is convenient to do so.
2142 In all cases an rtx is returned for the locus of the value;
2143 this may or may not be TARGET. */
2146 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2149 enum mode_class
class;
2150 enum machine_mode wider_mode
;
2152 rtx last
= get_last_insn ();
2155 class = GET_MODE_CLASS (mode
);
2158 op0
= force_not_mem (op0
);
2160 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2162 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2163 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2169 temp
= gen_reg_rtx (mode
);
2171 if (GET_MODE (xop0
) != VOIDmode
2172 && GET_MODE (xop0
) != mode0
)
2173 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2175 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2177 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2178 xop0
= copy_to_mode_reg (mode0
, xop0
);
2180 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2181 temp
= gen_reg_rtx (mode
);
2183 pat
= GEN_FCN (icode
) (temp
, xop0
);
2186 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2187 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2189 delete_insns_since (last
);
2190 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2198 delete_insns_since (last
);
2201 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2203 /* Widening clz needs special treatment. */
2204 if (unoptab
== clz_optab
)
2206 temp
= widen_clz (mode
, op0
, target
);
2213 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2214 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2215 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2217 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2221 /* For certain operations, we need not actually extend
2222 the narrow operand, as long as we will truncate the
2223 results to the same narrowness. */
2225 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2226 (unoptab
== neg_optab
2227 || unoptab
== one_cmpl_optab
)
2228 && class == MODE_INT
);
2230 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2235 if (class != MODE_INT
)
2238 target
= gen_reg_rtx (mode
);
2239 convert_move (target
, temp
, 0);
2243 return gen_lowpart (mode
, temp
);
2246 delete_insns_since (last
);
2250 /* These can be done a word at a time. */
2251 if (unoptab
== one_cmpl_optab
2252 && class == MODE_INT
2253 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2254 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2259 if (target
== 0 || target
== op0
)
2260 target
= gen_reg_rtx (mode
);
2264 /* Do the actual arithmetic. */
2265 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2267 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2268 rtx x
= expand_unop (word_mode
, unoptab
,
2269 operand_subword_force (op0
, i
, mode
),
2270 target_piece
, unsignedp
);
2272 if (target_piece
!= x
)
2273 emit_move_insn (target_piece
, x
);
2276 insns
= get_insns ();
2279 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2280 gen_rtx_fmt_e (unoptab
->code
, mode
,
2285 /* Try negating floating point values by flipping the sign bit. */
2286 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2287 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2289 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2290 enum machine_mode imode
= int_mode_for_mode (mode
);
2291 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2293 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2295 HOST_WIDE_INT hi
, lo
;
2296 rtx last
= get_last_insn ();
2298 /* Handle targets with different FP word orders. */
2299 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2301 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2302 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2303 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2306 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2309 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2313 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2316 temp
= expand_binop (imode
, xor_optab
,
2317 gen_lowpart (imode
, op0
),
2318 immed_double_const (lo
, hi
, imode
),
2319 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2324 target
= gen_reg_rtx (mode
);
2325 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2326 set_unique_reg_note (insn
, REG_EQUAL
,
2327 gen_rtx_fmt_e (NEG
, mode
,
2331 delete_insns_since (last
);
2335 /* Try calculating parity (x) as popcount (x) % 2. */
2336 if (unoptab
== parity_optab
)
2338 temp
= expand_parity (mode
, op0
, target
);
2343 /* If there is no negation pattern, try subtracting from zero. */
2344 if (unoptab
== neg_optab
&& class == MODE_INT
)
2346 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2347 target
, unsignedp
, OPTAB_DIRECT
);
2353 /* Now try a library call in this mode. */
2354 if (unoptab
->handlers
[(int) mode
].libfunc
)
2358 enum machine_mode outmode
= mode
;
2360 /* All of these functions return small values. Thus we choose to
2361 have them return something that isn't a double-word. */
2362 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2363 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2365 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2369 /* Pass 1 for NO_QUEUE so we don't lose any increments
2370 if the libcall is cse'd or moved. */
2371 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2372 NULL_RTX
, LCT_CONST
, outmode
,
2374 insns
= get_insns ();
2377 target
= gen_reg_rtx (outmode
);
2378 emit_libcall_block (insns
, target
, value
,
2379 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2384 /* It can't be done in this mode. Can we do it in a wider mode? */
2386 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2388 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2389 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2391 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2392 != CODE_FOR_nothing
)
2393 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2397 /* For certain operations, we need not actually extend
2398 the narrow operand, as long as we will truncate the
2399 results to the same narrowness. */
2401 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2402 (unoptab
== neg_optab
2403 || unoptab
== one_cmpl_optab
)
2404 && class == MODE_INT
);
2406 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2409 /* If we are generating clz using wider mode, adjust the
2411 if (unoptab
== clz_optab
&& temp
!= 0)
2412 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2413 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2414 - GET_MODE_BITSIZE (mode
)),
2415 target
, true, OPTAB_DIRECT
);
2419 if (class != MODE_INT
)
2422 target
= gen_reg_rtx (mode
);
2423 convert_move (target
, temp
, 0);
2427 return gen_lowpart (mode
, temp
);
2430 delete_insns_since (last
);
2435 /* If there is no negate operation, try doing a subtract from zero.
2436 The US Software GOFAST library needs this. FIXME: This is *wrong*
2437 for floating-point operations due to negative zeros! */
2438 if (unoptab
->code
== NEG
)
2441 temp
= expand_binop (mode
,
2442 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2443 CONST0_RTX (mode
), op0
,
2444 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2452 /* Emit code to compute the absolute value of OP0, with result to
2453 TARGET if convenient. (TARGET may be 0.) The return value says
2454 where the result actually is to be found.
2456 MODE is the mode of the operand; the mode of the result is
2457 different but can be deduced from MODE.
2462 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2463 int result_unsignedp
)
2468 result_unsignedp
= 1;
2470 /* First try to do it with a special abs instruction. */
2471 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2476 /* For floating point modes, try clearing the sign bit. */
2477 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2478 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2480 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2481 enum machine_mode imode
= int_mode_for_mode (mode
);
2482 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2484 if (imode
!= BLKmode
&& bitpos
>= 0)
2486 HOST_WIDE_INT hi
, lo
;
2487 rtx last
= get_last_insn ();
2489 /* Handle targets with different FP word orders. */
2490 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2492 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2493 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2494 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2497 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2500 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2504 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2507 temp
= expand_binop (imode
, and_optab
,
2508 gen_lowpart (imode
, op0
),
2509 immed_double_const (~lo
, ~hi
, imode
),
2510 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2515 target
= gen_reg_rtx (mode
);
2516 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2517 set_unique_reg_note (insn
, REG_EQUAL
,
2518 gen_rtx_fmt_e (ABS
, mode
,
2522 delete_insns_since (last
);
2526 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2527 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2529 rtx last
= get_last_insn ();
2531 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2533 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2539 delete_insns_since (last
);
2542 /* If this machine has expensive jumps, we can do integer absolute
2543 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2544 where W is the width of MODE. */
2546 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2548 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2549 size_int (GET_MODE_BITSIZE (mode
) - 1),
2552 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2555 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2556 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2566 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2567 int result_unsignedp
, int safe
)
2572 result_unsignedp
= 1;
2574 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2578 /* If that does not win, use conditional jump and negate. */
2580 /* It is safe to use the target if it is the same
2581 as the source if this is also a pseudo register */
2582 if (op0
== target
&& REG_P (op0
)
2583 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2586 op1
= gen_label_rtx ();
2587 if (target
== 0 || ! safe
2588 || GET_MODE (target
) != mode
2589 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2591 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2592 target
= gen_reg_rtx (mode
);
2594 emit_move_insn (target
, op0
);
2597 /* If this mode is an integer too wide to compare properly,
2598 compare word by word. Rely on CSE to optimize constant cases. */
2599 if (GET_MODE_CLASS (mode
) == MODE_INT
2600 && ! can_compare_p (GE
, mode
, ccp_jump
))
2601 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2604 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2605 NULL_RTX
, NULL_RTX
, op1
);
2607 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2610 emit_move_insn (target
, op0
);
2616 /* Generate an instruction whose insn-code is INSN_CODE,
2617 with two operands: an output TARGET and an input OP0.
2618 TARGET *must* be nonzero, and the output is always stored there.
2619 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2620 the value that is stored into TARGET. */
2623 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2626 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2631 /* Sign and zero extension from memory is often done specially on
2632 RISC machines, so forcing into a register here can pessimize
2634 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2635 op0
= force_not_mem (op0
);
2637 /* Now, if insn does not accept our operands, put them into pseudos. */
2639 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2640 op0
= copy_to_mode_reg (mode0
, op0
);
2642 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2643 || (flag_force_mem
&& MEM_P (temp
)))
2644 temp
= gen_reg_rtx (GET_MODE (temp
));
2646 pat
= GEN_FCN (icode
) (temp
, op0
);
2648 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2649 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2654 emit_move_insn (target
, temp
);
2657 /* Emit code to perform a series of operations on a multi-word quantity, one
2660 Such a block is preceded by a CLOBBER of the output, consists of multiple
2661 insns, each setting one word of the output, and followed by a SET copying
2662 the output to itself.
2664 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2665 note indicating that it doesn't conflict with the (also multi-word)
2666 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2669 INSNS is a block of code generated to perform the operation, not including
2670 the CLOBBER and final copy. All insns that compute intermediate values
2671 are first emitted, followed by the block as described above.
2673 TARGET, OP0, and OP1 are the output and inputs of the operations,
2674 respectively. OP1 may be zero for a unary operation.
2676 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2679 If TARGET is not a register, INSNS is simply emitted with no special
2680 processing. Likewise if anything in INSNS is not an INSN or if
2681 there is a libcall block inside INSNS.
2683 The final insn emitted is returned. */
2686 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2688 rtx prev
, next
, first
, last
, insn
;
2690 if (!REG_P (target
) || reload_in_progress
)
2691 return emit_insn (insns
);
2693 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2694 if (!NONJUMP_INSN_P (insn
)
2695 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2696 return emit_insn (insns
);
2698 /* First emit all insns that do not store into words of the output and remove
2699 these from the list. */
2700 for (insn
= insns
; insn
; insn
= next
)
2705 next
= NEXT_INSN (insn
);
2707 /* Some ports (cris) create a libcall regions at their own. We must
2708 avoid any potential nesting of LIBCALLs. */
2709 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2710 remove_note (insn
, note
);
2711 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2712 remove_note (insn
, note
);
2714 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2715 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2716 set
= PATTERN (insn
);
2717 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2719 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2720 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2722 set
= XVECEXP (PATTERN (insn
), 0, i
);
2730 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2732 if (PREV_INSN (insn
))
2733 NEXT_INSN (PREV_INSN (insn
)) = next
;
2738 PREV_INSN (next
) = PREV_INSN (insn
);
2744 prev
= get_last_insn ();
2746 /* Now write the CLOBBER of the output, followed by the setting of each
2747 of the words, followed by the final copy. */
2748 if (target
!= op0
&& target
!= op1
)
2749 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2751 for (insn
= insns
; insn
; insn
= next
)
2753 next
= NEXT_INSN (insn
);
2756 if (op1
&& REG_P (op1
))
2757 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2760 if (op0
&& REG_P (op0
))
2761 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2765 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2766 != CODE_FOR_nothing
)
2768 last
= emit_move_insn (target
, target
);
2770 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2774 last
= get_last_insn ();
2776 /* Remove any existing REG_EQUAL note from "last", or else it will
2777 be mistaken for a note referring to the full contents of the
2778 alleged libcall value when found together with the REG_RETVAL
2779 note added below. An existing note can come from an insn
2780 expansion at "last". */
2781 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2785 first
= get_insns ();
2787 first
= NEXT_INSN (prev
);
2789 /* Encapsulate the block so it gets manipulated as a unit. */
2790 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2792 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
2797 /* Emit code to make a call to a constant function or a library call.
2799 INSNS is a list containing all insns emitted in the call.
2800 These insns leave the result in RESULT. Our block is to copy RESULT
2801 to TARGET, which is logically equivalent to EQUIV.
2803 We first emit any insns that set a pseudo on the assumption that these are
2804 loading constants into registers; doing so allows them to be safely cse'ed
2805 between blocks. Then we emit all the other insns in the block, followed by
2806 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2807 note with an operand of EQUIV.
2809 Moving assignments to pseudos outside of the block is done to improve
2810 the generated code, but is not required to generate correct code,
2811 hence being unable to move an assignment is not grounds for not making
2812 a libcall block. There are two reasons why it is safe to leave these
2813 insns inside the block: First, we know that these pseudos cannot be
2814 used in generated RTL outside the block since they are created for
2815 temporary purposes within the block. Second, CSE will not record the
2816 values of anything set inside a libcall block, so we know they must
2817 be dead at the end of the block.
2819 Except for the first group of insns (the ones setting pseudos), the
2820 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2823 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
2825 rtx final_dest
= target
;
2826 rtx prev
, next
, first
, last
, insn
;
2828 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
2829 into a MEM later. Protect the libcall block from this change. */
2830 if (! REG_P (target
) || REG_USERVAR_P (target
))
2831 target
= gen_reg_rtx (GET_MODE (target
));
2833 /* If we're using non-call exceptions, a libcall corresponding to an
2834 operation that may trap may also trap. */
2835 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
2837 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2840 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2842 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
2843 remove_note (insn
, note
);
2847 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
2848 reg note to indicate that this call cannot throw or execute a nonlocal
2849 goto (unless there is already a REG_EH_REGION note, in which case
2851 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2854 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2857 XEXP (note
, 0) = constm1_rtx
;
2859 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
2863 /* First emit all insns that set pseudos. Remove them from the list as
2864 we go. Avoid insns that set pseudos which were referenced in previous
2865 insns. These can be generated by move_by_pieces, for example,
2866 to update an address. Similarly, avoid insns that reference things
2867 set in previous insns. */
2869 for (insn
= insns
; insn
; insn
= next
)
2871 rtx set
= single_set (insn
);
2874 /* Some ports (cris) create a libcall regions at their own. We must
2875 avoid any potential nesting of LIBCALLs. */
2876 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2877 remove_note (insn
, note
);
2878 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2879 remove_note (insn
, note
);
2881 next
= NEXT_INSN (insn
);
2883 if (set
!= 0 && REG_P (SET_DEST (set
))
2884 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2886 || ((! INSN_P(insns
)
2887 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
2888 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2889 && ! modified_in_p (SET_SRC (set
), insns
)
2890 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2892 if (PREV_INSN (insn
))
2893 NEXT_INSN (PREV_INSN (insn
)) = next
;
2898 PREV_INSN (next
) = PREV_INSN (insn
);
2903 /* Some ports use a loop to copy large arguments onto the stack.
2904 Don't move anything outside such a loop. */
2909 prev
= get_last_insn ();
2911 /* Write the remaining insns followed by the final copy. */
2913 for (insn
= insns
; insn
; insn
= next
)
2915 next
= NEXT_INSN (insn
);
2920 last
= emit_move_insn (target
, result
);
2921 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2922 != CODE_FOR_nothing
)
2923 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
2926 /* Remove any existing REG_EQUAL note from "last", or else it will
2927 be mistaken for a note referring to the full contents of the
2928 libcall value when found together with the REG_RETVAL note added
2929 below. An existing note can come from an insn expansion at
2931 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2934 if (final_dest
!= target
)
2935 emit_move_insn (final_dest
, target
);
2938 first
= get_insns ();
2940 first
= NEXT_INSN (prev
);
2942 /* Encapsulate the block so it gets manipulated as a unit. */
2943 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
2945 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
2946 when the encapsulated region would not be in one basic block,
2947 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
2949 bool attach_libcall_retval_notes
= true;
2950 next
= NEXT_INSN (last
);
2951 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
2952 if (control_flow_insn_p (insn
))
2954 attach_libcall_retval_notes
= false;
2958 if (attach_libcall_retval_notes
)
2960 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2962 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
2968 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
2969 PURPOSE describes how this comparison will be used. CODE is the rtx
2970 comparison code we will be using.
2972 ??? Actually, CODE is slightly weaker than that. A target is still
2973 required to implement all of the normal bcc operations, but not
2974 required to implement all (or any) of the unordered bcc operations. */
2977 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
2978 enum can_compare_purpose purpose
)
2982 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2984 if (purpose
== ccp_jump
)
2985 return bcc_gen_fctn
[(int) code
] != NULL
;
2986 else if (purpose
== ccp_store_flag
)
2987 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
2989 /* There's only one cmov entry point, and it's allowed to fail. */
2992 if (purpose
== ccp_jump
2993 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2995 if (purpose
== ccp_cmov
2996 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2998 if (purpose
== ccp_store_flag
2999 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3001 mode
= GET_MODE_WIDER_MODE (mode
);
3003 while (mode
!= VOIDmode
);
3008 /* This function is called when we are going to emit a compare instruction that
3009 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3011 *PMODE is the mode of the inputs (in case they are const_int).
3012 *PUNSIGNEDP nonzero says that the operands are unsigned;
3013 this matters if they need to be widened.
3015 If they have mode BLKmode, then SIZE specifies the size of both operands.
3017 This function performs all the setup necessary so that the caller only has
3018 to emit a single comparison insn. This setup can involve doing a BLKmode
3019 comparison or emitting a library call to perform the comparison if no insn
3020 is available to handle it.
3021 The values which are passed in through pointers can be modified; the caller
3022 should perform the comparison on the modified values. */
3025 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3026 enum machine_mode
*pmode
, int *punsignedp
,
3027 enum can_compare_purpose purpose
)
3029 enum machine_mode mode
= *pmode
;
3030 rtx x
= *px
, y
= *py
;
3031 int unsignedp
= *punsignedp
;
3032 enum mode_class
class;
3034 class = GET_MODE_CLASS (mode
);
3036 /* They could both be VOIDmode if both args are immediate constants,
3037 but we should fold that at an earlier stage.
3038 With no special code here, this will call abort,
3039 reminding the programmer to implement such folding. */
3041 if (mode
!= BLKmode
&& flag_force_mem
)
3043 /* Load duplicate non-volatile operands once. */
3044 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3046 x
= force_not_mem (x
);
3051 x
= force_not_mem (x
);
3052 y
= force_not_mem (y
);
3056 /* If we are inside an appropriately-short loop and we are optimizing,
3057 force expensive constants into a register. */
3058 if (CONSTANT_P (x
) && optimize
3059 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3060 x
= force_reg (mode
, x
);
3062 if (CONSTANT_P (y
) && optimize
3063 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3064 y
= force_reg (mode
, y
);
3067 /* Abort if we have a non-canonical comparison. The RTL documentation
3068 states that canonical comparisons are required only for targets which
3070 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3074 /* Don't let both operands fail to indicate the mode. */
3075 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3076 x
= force_reg (mode
, x
);
3078 /* Handle all BLKmode compares. */
3080 if (mode
== BLKmode
)
3082 enum machine_mode cmp_mode
, result_mode
;
3083 enum insn_code cmp_code
;
3088 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3093 /* Try to use a memory block compare insn - either cmpstr
3094 or cmpmem will do. */
3095 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3096 cmp_mode
!= VOIDmode
;
3097 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3099 cmp_code
= cmpmem_optab
[cmp_mode
];
3100 if (cmp_code
== CODE_FOR_nothing
)
3101 cmp_code
= cmpstr_optab
[cmp_mode
];
3102 if (cmp_code
== CODE_FOR_nothing
)
3105 /* Must make sure the size fits the insn's mode. */
3106 if ((GET_CODE (size
) == CONST_INT
3107 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3108 || (GET_MODE_BITSIZE (GET_MODE (size
))
3109 > GET_MODE_BITSIZE (cmp_mode
)))
3112 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3113 result
= gen_reg_rtx (result_mode
);
3114 size
= convert_to_mode (cmp_mode
, size
, 1);
3115 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3119 *pmode
= result_mode
;
3123 /* Otherwise call a library function, memcmp. */
3124 libfunc
= memcmp_libfunc
;
3125 length_type
= sizetype
;
3126 result_mode
= TYPE_MODE (integer_type_node
);
3127 cmp_mode
= TYPE_MODE (length_type
);
3128 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3129 TYPE_UNSIGNED (length_type
));
3131 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3138 *pmode
= result_mode
;
3142 /* Don't allow operands to the compare to trap, as that can put the
3143 compare and branch in different basic blocks. */
3144 if (flag_non_call_exceptions
)
3147 x
= force_reg (mode
, x
);
3149 y
= force_reg (mode
, y
);
3154 if (can_compare_p (*pcomparison
, mode
, purpose
))
3157 /* Handle a lib call just for the mode we are using. */
3159 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3161 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3164 /* If we want unsigned, and this mode has a distinct unsigned
3165 comparison routine, use that. */
3166 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3167 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3169 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3170 word_mode
, 2, x
, mode
, y
, mode
);
3174 if (TARGET_LIB_INT_CMP_BIASED
)
3175 /* Integer comparison returns a result that must be compared
3176 against 1, so that even if we do an unsigned compare
3177 afterward, there is still a value that can represent the
3178 result "less than". */
3188 if (class == MODE_FLOAT
)
3189 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3195 /* Before emitting an insn with code ICODE, make sure that X, which is going
3196 to be used for operand OPNUM of the insn, is converted from mode MODE to
3197 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3198 that it is accepted by the operand predicate. Return the new value. */
3201 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3202 enum machine_mode wider_mode
, int unsignedp
)
3204 if (mode
!= wider_mode
)
3205 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3207 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3208 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3212 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3218 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3219 we can do the comparison.
3220 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3221 be NULL_RTX which indicates that only a comparison is to be generated. */
3224 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3225 enum rtx_code comparison
, int unsignedp
, rtx label
)
3227 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3228 enum mode_class
class = GET_MODE_CLASS (mode
);
3229 enum machine_mode wider_mode
= mode
;
3231 /* Try combined insns first. */
3234 enum insn_code icode
;
3235 PUT_MODE (test
, wider_mode
);
3239 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3241 if (icode
!= CODE_FOR_nothing
3242 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3244 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3245 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3246 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3251 /* Handle some compares against zero. */
3252 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3253 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3255 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3256 emit_insn (GEN_FCN (icode
) (x
));
3258 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3262 /* Handle compares for which there is a directly suitable insn. */
3264 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3265 if (icode
!= CODE_FOR_nothing
)
3267 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3268 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3269 emit_insn (GEN_FCN (icode
) (x
, y
));
3271 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3275 if (class != MODE_INT
&& class != MODE_FLOAT
3276 && class != MODE_COMPLEX_FLOAT
)
3279 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3281 while (wider_mode
!= VOIDmode
);
3286 /* Generate code to compare X with Y so that the condition codes are
3287 set and to jump to LABEL if the condition is true. If X is a
3288 constant and Y is not a constant, then the comparison is swapped to
3289 ensure that the comparison RTL has the canonical form.
3291 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3292 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3293 the proper branch condition code.
3295 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3297 MODE is the mode of the inputs (in case they are const_int).
3299 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3300 be passed unchanged to emit_cmp_insn, then potentially converted into an
3301 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3304 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3305 enum machine_mode mode
, int unsignedp
, rtx label
)
3307 rtx op0
= x
, op1
= y
;
3309 /* Swap operands and condition to ensure canonical RTL. */
3310 if (swap_commutative_operands_p (x
, y
))
3312 /* If we're not emitting a branch, this means some caller
3318 comparison
= swap_condition (comparison
);
3322 /* If OP0 is still a constant, then both X and Y must be constants. Force
3323 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3325 if (CONSTANT_P (op0
))
3326 op0
= force_reg (mode
, op0
);
3330 comparison
= unsigned_condition (comparison
);
3332 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3334 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3337 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3340 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3341 enum machine_mode mode
, int unsignedp
)
3343 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3346 /* Emit a library call comparison between floating point X and Y.
3347 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3350 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3351 enum machine_mode
*pmode
, int *punsignedp
)
3353 enum rtx_code comparison
= *pcomparison
;
3354 enum rtx_code swapped
= swap_condition (comparison
);
3355 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3358 enum machine_mode orig_mode
= GET_MODE (x
);
3359 enum machine_mode mode
;
3360 rtx value
, target
, insns
, equiv
;
3362 bool reversed_p
= false;
3364 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3366 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3369 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3372 tmp
= x
; x
= y
; y
= tmp
;
3373 comparison
= swapped
;
3377 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3378 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3380 comparison
= reversed
;
3386 if (mode
== VOIDmode
)
3389 if (mode
!= orig_mode
)
3391 x
= convert_to_mode (mode
, x
, 0);
3392 y
= convert_to_mode (mode
, y
, 0);
3395 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3396 the RTL. The allows the RTL optimizers to delete the libcall if the
3397 condition can be determined at compile-time. */
3398 if (comparison
== UNORDERED
)
3400 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3401 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3402 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3403 temp
, const_true_rtx
, equiv
);
3407 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3408 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3410 rtx true_rtx
, false_rtx
;
3415 true_rtx
= const0_rtx
;
3416 false_rtx
= const_true_rtx
;
3420 true_rtx
= const_true_rtx
;
3421 false_rtx
= const0_rtx
;
3425 true_rtx
= const1_rtx
;
3426 false_rtx
= const0_rtx
;
3430 true_rtx
= const0_rtx
;
3431 false_rtx
= constm1_rtx
;
3435 true_rtx
= constm1_rtx
;
3436 false_rtx
= const0_rtx
;
3440 true_rtx
= const0_rtx
;
3441 false_rtx
= const1_rtx
;
3447 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3448 equiv
, true_rtx
, false_rtx
);
3453 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3454 word_mode
, 2, x
, mode
, y
, mode
);
3455 insns
= get_insns ();
3458 target
= gen_reg_rtx (word_mode
);
3459 emit_libcall_block (insns
, target
, value
, equiv
);
3461 if (comparison
== UNORDERED
3462 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3463 comparison
= reversed_p
? EQ
: NE
;
3468 *pcomparison
= comparison
;
3472 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3475 emit_indirect_jump (rtx loc
)
3477 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3479 loc
= copy_to_mode_reg (Pmode
, loc
);
3481 emit_jump_insn (gen_indirect_jump (loc
));
3485 #ifdef HAVE_conditional_move
3487 /* Emit a conditional move instruction if the machine supports one for that
3488 condition and machine mode.
3490 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3491 the mode to use should they be constants. If it is VOIDmode, they cannot
3494 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3495 should be stored there. MODE is the mode to use should they be constants.
3496 If it is VOIDmode, they cannot both be constants.
3498 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3499 is not supported. */
3502 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3503 enum machine_mode cmode
, rtx op2
, rtx op3
,
3504 enum machine_mode mode
, int unsignedp
)
3506 rtx tem
, subtarget
, comparison
, insn
;
3507 enum insn_code icode
;
3508 enum rtx_code reversed
;
3510 /* If one operand is constant, make it the second one. Only do this
3511 if the other operand is not constant as well. */
3513 if (swap_commutative_operands_p (op0
, op1
))
3518 code
= swap_condition (code
);
3521 /* get_condition will prefer to generate LT and GT even if the old
3522 comparison was against zero, so undo that canonicalization here since
3523 comparisons against zero are cheaper. */
3524 if (code
== LT
&& op1
== const1_rtx
)
3525 code
= LE
, op1
= const0_rtx
;
3526 else if (code
== GT
&& op1
== constm1_rtx
)
3527 code
= GE
, op1
= const0_rtx
;
3529 if (cmode
== VOIDmode
)
3530 cmode
= GET_MODE (op0
);
3532 if (swap_commutative_operands_p (op2
, op3
)
3533 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3542 if (mode
== VOIDmode
)
3543 mode
= GET_MODE (op2
);
3545 icode
= movcc_gen_code
[mode
];
3547 if (icode
== CODE_FOR_nothing
)
3552 op2
= force_not_mem (op2
);
3553 op3
= force_not_mem (op3
);
3557 target
= gen_reg_rtx (mode
);
3561 /* If the insn doesn't accept these operands, put them in pseudos. */
3563 if (! (*insn_data
[icode
].operand
[0].predicate
)
3564 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3565 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3567 if (! (*insn_data
[icode
].operand
[2].predicate
)
3568 (op2
, insn_data
[icode
].operand
[2].mode
))
3569 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3571 if (! (*insn_data
[icode
].operand
[3].predicate
)
3572 (op3
, insn_data
[icode
].operand
[3].mode
))
3573 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3575 /* Everything should now be in the suitable form, so emit the compare insn
3576 and then the conditional move. */
3579 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3581 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3582 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3583 return NULL and let the caller figure out how best to deal with this
3585 if (GET_CODE (comparison
) != code
)
3588 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3590 /* If that failed, then give up. */
3596 if (subtarget
!= target
)
3597 convert_move (target
, subtarget
, 0);
3602 /* Return nonzero if a conditional move of mode MODE is supported.
3604 This function is for combine so it can tell whether an insn that looks
3605 like a conditional move is actually supported by the hardware. If we
3606 guess wrong we lose a bit on optimization, but that's it. */
3607 /* ??? sparc64 supports conditionally moving integers values based on fp
3608 comparisons, and vice versa. How do we handle them? */
3611 can_conditionally_move_p (enum machine_mode mode
)
3613 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3619 #endif /* HAVE_conditional_move */
3621 /* Emit a conditional addition instruction if the machine supports one for that
3622 condition and machine mode.
3624 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3625 the mode to use should they be constants. If it is VOIDmode, they cannot
3628 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3629 should be stored there. MODE is the mode to use should they be constants.
3630 If it is VOIDmode, they cannot both be constants.
3632 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3633 is not supported. */
3636 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3637 enum machine_mode cmode
, rtx op2
, rtx op3
,
3638 enum machine_mode mode
, int unsignedp
)
3640 rtx tem
, subtarget
, comparison
, insn
;
3641 enum insn_code icode
;
3642 enum rtx_code reversed
;
3644 /* If one operand is constant, make it the second one. Only do this
3645 if the other operand is not constant as well. */
3647 if (swap_commutative_operands_p (op0
, op1
))
3652 code
= swap_condition (code
);
3655 /* get_condition will prefer to generate LT and GT even if the old
3656 comparison was against zero, so undo that canonicalization here since
3657 comparisons against zero are cheaper. */
3658 if (code
== LT
&& op1
== const1_rtx
)
3659 code
= LE
, op1
= const0_rtx
;
3660 else if (code
== GT
&& op1
== constm1_rtx
)
3661 code
= GE
, op1
= const0_rtx
;
3663 if (cmode
== VOIDmode
)
3664 cmode
= GET_MODE (op0
);
3666 if (swap_commutative_operands_p (op2
, op3
)
3667 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3676 if (mode
== VOIDmode
)
3677 mode
= GET_MODE (op2
);
3679 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3681 if (icode
== CODE_FOR_nothing
)
3686 op2
= force_not_mem (op2
);
3687 op3
= force_not_mem (op3
);
3691 target
= gen_reg_rtx (mode
);
3693 /* If the insn doesn't accept these operands, put them in pseudos. */
3695 if (! (*insn_data
[icode
].operand
[0].predicate
)
3696 (target
, insn_data
[icode
].operand
[0].mode
))
3697 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3701 if (! (*insn_data
[icode
].operand
[2].predicate
)
3702 (op2
, insn_data
[icode
].operand
[2].mode
))
3703 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3705 if (! (*insn_data
[icode
].operand
[3].predicate
)
3706 (op3
, insn_data
[icode
].operand
[3].mode
))
3707 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3709 /* Everything should now be in the suitable form, so emit the compare insn
3710 and then the conditional move. */
3713 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3715 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3716 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3717 return NULL and let the caller figure out how best to deal with this
3719 if (GET_CODE (comparison
) != code
)
3722 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3724 /* If that failed, then give up. */
3730 if (subtarget
!= target
)
3731 convert_move (target
, subtarget
, 0);
3736 /* These functions attempt to generate an insn body, rather than
3737 emitting the insn, but if the gen function already emits them, we
3738 make no attempt to turn them back into naked patterns. */
3740 /* Generate and return an insn body to add Y to X. */
3743 gen_add2_insn (rtx x
, rtx y
)
3745 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3747 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3748 (x
, insn_data
[icode
].operand
[0].mode
))
3749 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3750 (x
, insn_data
[icode
].operand
[1].mode
))
3751 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3752 (y
, insn_data
[icode
].operand
[2].mode
)))
3755 return (GEN_FCN (icode
) (x
, x
, y
));
3758 /* Generate and return an insn body to add r1 and c,
3759 storing the result in r0. */
3761 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
3763 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3765 if (icode
== CODE_FOR_nothing
3766 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3767 (r0
, insn_data
[icode
].operand
[0].mode
))
3768 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3769 (r1
, insn_data
[icode
].operand
[1].mode
))
3770 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3771 (c
, insn_data
[icode
].operand
[2].mode
)))
3774 return (GEN_FCN (icode
) (r0
, r1
, c
));
3778 have_add2_insn (rtx x
, rtx y
)
3782 if (GET_MODE (x
) == VOIDmode
)
3785 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3787 if (icode
== CODE_FOR_nothing
)
3790 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3791 (x
, insn_data
[icode
].operand
[0].mode
))
3792 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3793 (x
, insn_data
[icode
].operand
[1].mode
))
3794 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3795 (y
, insn_data
[icode
].operand
[2].mode
)))
3801 /* Generate and return an insn body to subtract Y from X. */
3804 gen_sub2_insn (rtx x
, rtx y
)
3806 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3808 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3809 (x
, insn_data
[icode
].operand
[0].mode
))
3810 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3811 (x
, insn_data
[icode
].operand
[1].mode
))
3812 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3813 (y
, insn_data
[icode
].operand
[2].mode
)))
3816 return (GEN_FCN (icode
) (x
, x
, y
));
3819 /* Generate and return an insn body to subtract r1 and c,
3820 storing the result in r0. */
3822 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
3824 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3826 if (icode
== CODE_FOR_nothing
3827 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3828 (r0
, insn_data
[icode
].operand
[0].mode
))
3829 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3830 (r1
, insn_data
[icode
].operand
[1].mode
))
3831 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3832 (c
, insn_data
[icode
].operand
[2].mode
)))
3835 return (GEN_FCN (icode
) (r0
, r1
, c
));
3839 have_sub2_insn (rtx x
, rtx y
)
3843 if (GET_MODE (x
) == VOIDmode
)
3846 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3848 if (icode
== CODE_FOR_nothing
)
3851 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3852 (x
, insn_data
[icode
].operand
[0].mode
))
3853 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3854 (x
, insn_data
[icode
].operand
[1].mode
))
3855 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3856 (y
, insn_data
[icode
].operand
[2].mode
)))
3862 /* Generate the body of an instruction to copy Y into X.
3863 It may be a list of insns, if one insn isn't enough. */
3866 gen_move_insn (rtx x
, rtx y
)
3871 emit_move_insn_1 (x
, y
);
3877 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3878 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3879 no such operation exists, CODE_FOR_nothing will be returned. */
3882 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
3886 #ifdef HAVE_ptr_extend
3888 return CODE_FOR_ptr_extend
;
3891 tab
= unsignedp
? zext_optab
: sext_optab
;
3892 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
3895 /* Generate the body of an insn to extend Y (with mode MFROM)
3896 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3899 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
3900 enum machine_mode mfrom
, int unsignedp
)
3902 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
3903 return GEN_FCN (icode
) (x
, y
);
3906 /* can_fix_p and can_float_p say whether the target machine
3907 can directly convert a given fixed point type to
3908 a given floating point type, or vice versa.
3909 The returned value is the CODE_FOR_... value to use,
3910 or CODE_FOR_nothing if these modes cannot be directly converted.
3912 *TRUNCP_PTR is set to 1 if it is necessary to output
3913 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3915 static enum insn_code
3916 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
3917 int unsignedp
, int *truncp_ptr
)
3920 enum insn_code icode
;
3922 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
3923 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
3924 if (icode
!= CODE_FOR_nothing
)
3930 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
3931 for this to work. We need to rework the fix* and ftrunc* patterns
3932 and documentation. */
3933 tab
= unsignedp
? ufix_optab
: sfix_optab
;
3934 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
3935 if (icode
!= CODE_FOR_nothing
3936 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
3943 return CODE_FOR_nothing
;
3946 static enum insn_code
3947 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
3952 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
3953 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
3956 /* Generate code to convert FROM to floating point
3957 and store in TO. FROM must be fixed point and not VOIDmode.
3958 UNSIGNEDP nonzero means regard FROM as unsigned.
3959 Normally this is done by correcting the final value
3960 if it is negative. */
3963 expand_float (rtx to
, rtx from
, int unsignedp
)
3965 enum insn_code icode
;
3967 enum machine_mode fmode
, imode
;
3969 /* Crash now, because we won't be able to decide which mode to use. */
3970 if (GET_MODE (from
) == VOIDmode
)
3973 /* Look for an insn to do the conversion. Do it in the specified
3974 modes if possible; otherwise convert either input, output or both to
3975 wider mode. If the integer mode is wider than the mode of FROM,
3976 we can do the conversion signed even if the input is unsigned. */
3978 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3979 fmode
= GET_MODE_WIDER_MODE (fmode
))
3980 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
3981 imode
= GET_MODE_WIDER_MODE (imode
))
3983 int doing_unsigned
= unsignedp
;
3985 if (fmode
!= GET_MODE (to
)
3986 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
3989 icode
= can_float_p (fmode
, imode
, unsignedp
);
3990 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
3991 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
3993 if (icode
!= CODE_FOR_nothing
)
3995 if (imode
!= GET_MODE (from
))
3996 from
= convert_to_mode (imode
, from
, unsignedp
);
3998 if (fmode
!= GET_MODE (to
))
3999 target
= gen_reg_rtx (fmode
);
4001 emit_unop_insn (icode
, target
, from
,
4002 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4005 convert_move (to
, target
, 0);
4010 /* Unsigned integer, and no way to convert directly.
4011 Convert as signed, then conditionally adjust the result. */
4014 rtx label
= gen_label_rtx ();
4016 REAL_VALUE_TYPE offset
;
4019 from
= force_not_mem (from
);
4021 /* Look for a usable floating mode FMODE wider than the source and at
4022 least as wide as the target. Using FMODE will avoid rounding woes
4023 with unsigned values greater than the signed maximum value. */
4025 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4026 fmode
= GET_MODE_WIDER_MODE (fmode
))
4027 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4028 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4031 if (fmode
== VOIDmode
)
4033 /* There is no such mode. Pretend the target is wide enough. */
4034 fmode
= GET_MODE (to
);
4036 /* Avoid double-rounding when TO is narrower than FROM. */
4037 if ((significand_size (fmode
) + 1)
4038 < GET_MODE_BITSIZE (GET_MODE (from
)))
4041 rtx neglabel
= gen_label_rtx ();
4043 /* Don't use TARGET if it isn't a register, is a hard register,
4044 or is the wrong mode. */
4046 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4047 || GET_MODE (target
) != fmode
)
4048 target
= gen_reg_rtx (fmode
);
4050 imode
= GET_MODE (from
);
4051 do_pending_stack_adjust ();
4053 /* Test whether the sign bit is set. */
4054 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4057 /* The sign bit is not set. Convert as signed. */
4058 expand_float (target
, from
, 0);
4059 emit_jump_insn (gen_jump (label
));
4062 /* The sign bit is set.
4063 Convert to a usable (positive signed) value by shifting right
4064 one bit, while remembering if a nonzero bit was shifted
4065 out; i.e., compute (from & 1) | (from >> 1). */
4067 emit_label (neglabel
);
4068 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4069 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4070 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4072 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4074 expand_float (target
, temp
, 0);
4076 /* Multiply by 2 to undo the shift above. */
4077 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4078 target
, 0, OPTAB_LIB_WIDEN
);
4080 emit_move_insn (target
, temp
);
4082 do_pending_stack_adjust ();
4088 /* If we are about to do some arithmetic to correct for an
4089 unsigned operand, do it in a pseudo-register. */
4091 if (GET_MODE (to
) != fmode
4092 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4093 target
= gen_reg_rtx (fmode
);
4095 /* Convert as signed integer to floating. */
4096 expand_float (target
, from
, 0);
4098 /* If FROM is negative (and therefore TO is negative),
4099 correct its value by 2**bitwidth. */
4101 do_pending_stack_adjust ();
4102 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4106 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4107 temp
= expand_binop (fmode
, add_optab
, target
,
4108 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4109 target
, 0, OPTAB_LIB_WIDEN
);
4111 emit_move_insn (target
, temp
);
4113 do_pending_stack_adjust ();
4118 /* No hardware instruction available; call a library routine. */
4123 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4125 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4126 from
= convert_to_mode (SImode
, from
, unsignedp
);
4129 from
= force_not_mem (from
);
4131 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4137 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4138 GET_MODE (to
), 1, from
,
4140 insns
= get_insns ();
4143 emit_libcall_block (insns
, target
, value
,
4144 gen_rtx_FLOAT (GET_MODE (to
), from
));
4149 /* Copy result to requested destination
4150 if we have been computing in a temp location. */
4154 if (GET_MODE (target
) == GET_MODE (to
))
4155 emit_move_insn (to
, target
);
4157 convert_move (to
, target
, 0);
4161 /* Generate code to convert FROM to fixed point and store in TO. FROM
4162 must be floating point. */
4165 expand_fix (rtx to
, rtx from
, int unsignedp
)
4167 enum insn_code icode
;
4169 enum machine_mode fmode
, imode
;
4172 /* We first try to find a pair of modes, one real and one integer, at
4173 least as wide as FROM and TO, respectively, in which we can open-code
4174 this conversion. If the integer mode is wider than the mode of TO,
4175 we can do the conversion either signed or unsigned. */
4177 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4178 fmode
= GET_MODE_WIDER_MODE (fmode
))
4179 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4180 imode
= GET_MODE_WIDER_MODE (imode
))
4182 int doing_unsigned
= unsignedp
;
4184 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4185 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4186 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4188 if (icode
!= CODE_FOR_nothing
)
4190 if (fmode
!= GET_MODE (from
))
4191 from
= convert_to_mode (fmode
, from
, 0);
4195 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4196 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4200 if (imode
!= GET_MODE (to
))
4201 target
= gen_reg_rtx (imode
);
4203 emit_unop_insn (icode
, target
, from
,
4204 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4206 convert_move (to
, target
, unsignedp
);
4211 /* For an unsigned conversion, there is one more way to do it.
4212 If we have a signed conversion, we generate code that compares
4213 the real value to the largest representable positive number. If if
4214 is smaller, the conversion is done normally. Otherwise, subtract
4215 one plus the highest signed number, convert, and add it back.
4217 We only need to check all real modes, since we know we didn't find
4218 anything with a wider integer mode.
4220 This code used to extend FP value into mode wider than the destination.
4221 This is not needed. Consider, for instance conversion from SFmode
4224 The hot path trought the code is dealing with inputs smaller than 2^63
4225 and doing just the conversion, so there is no bits to lose.
4227 In the other path we know the value is positive in the range 2^63..2^64-1
4228 inclusive. (as for other imput overflow happens and result is undefined)
4229 So we know that the most important bit set in mantissa corresponds to
4230 2^63. The subtraction of 2^63 should not generate any rounding as it
4231 simply clears out that bit. The rest is trivial. */
4233 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4234 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4235 fmode
= GET_MODE_WIDER_MODE (fmode
))
4236 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4240 REAL_VALUE_TYPE offset
;
4241 rtx limit
, lab1
, lab2
, insn
;
4243 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4244 real_2expN (&offset
, bitsize
- 1);
4245 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4246 lab1
= gen_label_rtx ();
4247 lab2
= gen_label_rtx ();
4250 from
= force_not_mem (from
);
4252 if (fmode
!= GET_MODE (from
))
4253 from
= convert_to_mode (fmode
, from
, 0);
4255 /* See if we need to do the subtraction. */
4256 do_pending_stack_adjust ();
4257 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4260 /* If not, do the signed "fix" and branch around fixup code. */
4261 expand_fix (to
, from
, 0);
4262 emit_jump_insn (gen_jump (lab2
));
4265 /* Otherwise, subtract 2**(N-1), convert to signed number,
4266 then add 2**(N-1). Do the addition using XOR since this
4267 will often generate better code. */
4269 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4270 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4271 expand_fix (to
, target
, 0);
4272 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4274 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4276 to
, 1, OPTAB_LIB_WIDEN
);
4279 emit_move_insn (to
, target
);
4283 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4284 != CODE_FOR_nothing
)
4286 /* Make a place for a REG_NOTE and add it. */
4287 insn
= emit_move_insn (to
, to
);
4288 set_unique_reg_note (insn
,
4290 gen_rtx_fmt_e (UNSIGNED_FIX
,
4298 /* We can't do it with an insn, so use a library call. But first ensure
4299 that the mode of TO is at least as wide as SImode, since those are the
4300 only library calls we know about. */
4302 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4304 target
= gen_reg_rtx (SImode
);
4306 expand_fix (target
, from
, unsignedp
);
4314 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4315 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4320 from
= force_not_mem (from
);
4324 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4325 GET_MODE (to
), 1, from
,
4327 insns
= get_insns ();
4330 emit_libcall_block (insns
, target
, value
,
4331 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4332 GET_MODE (to
), from
));
4337 if (GET_MODE (to
) == GET_MODE (target
))
4338 emit_move_insn (to
, target
);
4340 convert_move (to
, target
, 0);
4344 /* Report whether we have an instruction to perform the operation
4345 specified by CODE on operands of mode MODE. */
4347 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4349 return (code_to_optab
[(int) code
] != 0
4350 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4351 != CODE_FOR_nothing
));
4354 /* Create a blank optab. */
4359 optab op
= ggc_alloc (sizeof (struct optab
));
4360 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4362 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4363 op
->handlers
[i
].libfunc
= 0;
4369 static convert_optab
4370 new_convert_optab (void)
4373 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4374 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4375 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4377 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4378 op
->handlers
[i
][j
].libfunc
= 0;
4383 /* Same, but fill in its code as CODE, and write it into the
4384 code_to_optab table. */
4386 init_optab (enum rtx_code code
)
4388 optab op
= new_optab ();
4390 code_to_optab
[(int) code
] = op
;
4394 /* Same, but fill in its code as CODE, and do _not_ write it into
4395 the code_to_optab table. */
4397 init_optabv (enum rtx_code code
)
4399 optab op
= new_optab ();
4404 /* Conversion optabs never go in the code_to_optab table. */
4405 static inline convert_optab
4406 init_convert_optab (enum rtx_code code
)
4408 convert_optab op
= new_convert_optab ();
4413 /* Initialize the libfunc fields of an entire group of entries in some
4414 optab. Each entry is set equal to a string consisting of a leading
4415 pair of underscores followed by a generic operation name followed by
4416 a mode name (downshifted to lowercase) followed by a single character
4417 representing the number of operands for the given operation (which is
4418 usually one of the characters '2', '3', or '4').
4420 OPTABLE is the table in which libfunc fields are to be initialized.
4421 FIRST_MODE is the first machine mode index in the given optab to
4423 LAST_MODE is the last machine mode index in the given optab to
4425 OPNAME is the generic (string) name of the operation.
4426 SUFFIX is the character which specifies the number of operands for
4427 the given generic operation.
4431 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4432 const char *opname
, int suffix
)
4435 unsigned opname_len
= strlen (opname
);
4437 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4438 mode
= (enum machine_mode
) ((int) mode
+ 1))
4440 const char *mname
= GET_MODE_NAME (mode
);
4441 unsigned mname_len
= strlen (mname
);
4442 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4449 for (q
= opname
; *q
; )
4451 for (q
= mname
; *q
; q
++)
4452 *p
++ = TOLOWER (*q
);
4456 optable
->handlers
[(int) mode
].libfunc
4457 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4461 /* Initialize the libfunc fields of an entire group of entries in some
4462 optab which correspond to all integer mode operations. The parameters
4463 have the same meaning as similarly named ones for the `init_libfuncs'
4464 routine. (See above). */
4467 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4469 int maxsize
= 2*BITS_PER_WORD
;
4470 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4471 maxsize
= LONG_LONG_TYPE_SIZE
;
4472 init_libfuncs (optable
, word_mode
,
4473 mode_for_size (maxsize
, MODE_INT
, 0),
4477 /* Initialize the libfunc fields of an entire group of entries in some
4478 optab which correspond to all real mode operations. The parameters
4479 have the same meaning as similarly named ones for the `init_libfuncs'
4480 routine. (See above). */
4483 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4485 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4488 /* Initialize the libfunc fields of an entire group of entries of an
4489 inter-mode-class conversion optab. The string formation rules are
4490 similar to the ones for init_libfuncs, above, but instead of having
4491 a mode name and an operand count these functions have two mode names
4492 and no operand count. */
4494 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4495 enum mode_class from_class
,
4496 enum mode_class to_class
)
4498 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4499 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4500 size_t opname_len
= strlen (opname
);
4501 size_t max_mname_len
= 0;
4503 enum machine_mode fmode
, tmode
;
4504 const char *fname
, *tname
;
4506 char *libfunc_name
, *suffix
;
4509 for (fmode
= first_from_mode
;
4511 fmode
= GET_MODE_WIDER_MODE (fmode
))
4512 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4514 for (tmode
= first_to_mode
;
4516 tmode
= GET_MODE_WIDER_MODE (tmode
))
4517 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4519 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4520 libfunc_name
[0] = '_';
4521 libfunc_name
[1] = '_';
4522 memcpy (&libfunc_name
[2], opname
, opname_len
);
4523 suffix
= libfunc_name
+ opname_len
+ 2;
4525 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4526 fmode
= GET_MODE_WIDER_MODE (fmode
))
4527 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4528 tmode
= GET_MODE_WIDER_MODE (tmode
))
4530 fname
= GET_MODE_NAME (fmode
);
4531 tname
= GET_MODE_NAME (tmode
);
4534 for (q
= fname
; *q
; p
++, q
++)
4536 for (q
= tname
; *q
; p
++, q
++)
4541 tab
->handlers
[tmode
][fmode
].libfunc
4542 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4547 /* Initialize the libfunc fields of an entire group of entries of an
4548 intra-mode-class conversion optab. The string formation rules are
4549 similar to the ones for init_libfunc, above. WIDENING says whether
4550 the optab goes from narrow to wide modes or vice versa. These functions
4551 have two mode names _and_ an operand count. */
4553 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4554 enum mode_class
class, bool widening
)
4556 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4557 size_t opname_len
= strlen (opname
);
4558 size_t max_mname_len
= 0;
4560 enum machine_mode nmode
, wmode
;
4561 const char *nname
, *wname
;
4563 char *libfunc_name
, *suffix
;
4566 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4567 nmode
= GET_MODE_WIDER_MODE (nmode
))
4568 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4570 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4571 libfunc_name
[0] = '_';
4572 libfunc_name
[1] = '_';
4573 memcpy (&libfunc_name
[2], opname
, opname_len
);
4574 suffix
= libfunc_name
+ opname_len
+ 2;
4576 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4577 nmode
= GET_MODE_WIDER_MODE (nmode
))
4578 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4579 wmode
= GET_MODE_WIDER_MODE (wmode
))
4581 nname
= GET_MODE_NAME (nmode
);
4582 wname
= GET_MODE_NAME (wmode
);
4585 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4587 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4593 tab
->handlers
[widening
? wmode
: nmode
]
4594 [widening
? nmode
: wmode
].libfunc
4595 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4602 init_one_libfunc (const char *name
)
4606 /* Create a FUNCTION_DECL that can be passed to
4607 targetm.encode_section_info. */
4608 /* ??? We don't have any type information except for this is
4609 a function. Pretend this is "int foo()". */
4610 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4611 build_function_type (integer_type_node
, NULL_TREE
));
4612 DECL_ARTIFICIAL (decl
) = 1;
4613 DECL_EXTERNAL (decl
) = 1;
4614 TREE_PUBLIC (decl
) = 1;
4616 symbol
= XEXP (DECL_RTL (decl
), 0);
4618 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4619 are the flags assigned by targetm.encode_section_info. */
4620 SYMBOL_REF_DECL (symbol
) = 0;
4625 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4626 MODE to NAME, which should be either 0 or a string constant. */
4628 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4631 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4633 optable
->handlers
[mode
].libfunc
= 0;
4636 /* Call this to reset the function entry for one conversion optab
4637 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4638 either 0 or a string constant. */
4640 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4641 enum machine_mode fmode
, const char *name
)
4644 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4646 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4649 /* Call this once to initialize the contents of the optabs
4650 appropriately for the current target machine. */
4657 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4659 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4660 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4662 #ifdef HAVE_conditional_move
4663 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4664 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4667 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4669 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4670 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4673 add_optab
= init_optab (PLUS
);
4674 addv_optab
= init_optabv (PLUS
);
4675 sub_optab
= init_optab (MINUS
);
4676 subv_optab
= init_optabv (MINUS
);
4677 smul_optab
= init_optab (MULT
);
4678 smulv_optab
= init_optabv (MULT
);
4679 smul_highpart_optab
= init_optab (UNKNOWN
);
4680 umul_highpart_optab
= init_optab (UNKNOWN
);
4681 smul_widen_optab
= init_optab (UNKNOWN
);
4682 umul_widen_optab
= init_optab (UNKNOWN
);
4683 sdiv_optab
= init_optab (DIV
);
4684 sdivv_optab
= init_optabv (DIV
);
4685 sdivmod_optab
= init_optab (UNKNOWN
);
4686 udiv_optab
= init_optab (UDIV
);
4687 udivmod_optab
= init_optab (UNKNOWN
);
4688 smod_optab
= init_optab (MOD
);
4689 umod_optab
= init_optab (UMOD
);
4690 fmod_optab
= init_optab (UNKNOWN
);
4691 drem_optab
= init_optab (UNKNOWN
);
4692 ftrunc_optab
= init_optab (UNKNOWN
);
4693 and_optab
= init_optab (AND
);
4694 ior_optab
= init_optab (IOR
);
4695 xor_optab
= init_optab (XOR
);
4696 ashl_optab
= init_optab (ASHIFT
);
4697 ashr_optab
= init_optab (ASHIFTRT
);
4698 lshr_optab
= init_optab (LSHIFTRT
);
4699 rotl_optab
= init_optab (ROTATE
);
4700 rotr_optab
= init_optab (ROTATERT
);
4701 smin_optab
= init_optab (SMIN
);
4702 smax_optab
= init_optab (SMAX
);
4703 umin_optab
= init_optab (UMIN
);
4704 umax_optab
= init_optab (UMAX
);
4705 pow_optab
= init_optab (UNKNOWN
);
4706 atan2_optab
= init_optab (UNKNOWN
);
4708 /* These three have codes assigned exclusively for the sake of
4710 mov_optab
= init_optab (SET
);
4711 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4712 cmp_optab
= init_optab (COMPARE
);
4714 ucmp_optab
= init_optab (UNKNOWN
);
4715 tst_optab
= init_optab (UNKNOWN
);
4717 eq_optab
= init_optab (EQ
);
4718 ne_optab
= init_optab (NE
);
4719 gt_optab
= init_optab (GT
);
4720 ge_optab
= init_optab (GE
);
4721 lt_optab
= init_optab (LT
);
4722 le_optab
= init_optab (LE
);
4723 unord_optab
= init_optab (UNORDERED
);
4725 neg_optab
= init_optab (NEG
);
4726 negv_optab
= init_optabv (NEG
);
4727 abs_optab
= init_optab (ABS
);
4728 absv_optab
= init_optabv (ABS
);
4729 addcc_optab
= init_optab (UNKNOWN
);
4730 one_cmpl_optab
= init_optab (NOT
);
4731 ffs_optab
= init_optab (FFS
);
4732 clz_optab
= init_optab (CLZ
);
4733 ctz_optab
= init_optab (CTZ
);
4734 popcount_optab
= init_optab (POPCOUNT
);
4735 parity_optab
= init_optab (PARITY
);
4736 sqrt_optab
= init_optab (SQRT
);
4737 floor_optab
= init_optab (UNKNOWN
);
4738 ceil_optab
= init_optab (UNKNOWN
);
4739 round_optab
= init_optab (UNKNOWN
);
4740 btrunc_optab
= init_optab (UNKNOWN
);
4741 nearbyint_optab
= init_optab (UNKNOWN
);
4742 rint_optab
= init_optab (UNKNOWN
);
4743 sincos_optab
= init_optab (UNKNOWN
);
4744 sin_optab
= init_optab (UNKNOWN
);
4745 asin_optab
= init_optab (UNKNOWN
);
4746 cos_optab
= init_optab (UNKNOWN
);
4747 acos_optab
= init_optab (UNKNOWN
);
4748 exp_optab
= init_optab (UNKNOWN
);
4749 exp10_optab
= init_optab (UNKNOWN
);
4750 exp2_optab
= init_optab (UNKNOWN
);
4751 expm1_optab
= init_optab (UNKNOWN
);
4752 logb_optab
= init_optab (UNKNOWN
);
4753 ilogb_optab
= init_optab (UNKNOWN
);
4754 log_optab
= init_optab (UNKNOWN
);
4755 log10_optab
= init_optab (UNKNOWN
);
4756 log2_optab
= init_optab (UNKNOWN
);
4757 log1p_optab
= init_optab (UNKNOWN
);
4758 tan_optab
= init_optab (UNKNOWN
);
4759 atan_optab
= init_optab (UNKNOWN
);
4760 strlen_optab
= init_optab (UNKNOWN
);
4761 cbranch_optab
= init_optab (UNKNOWN
);
4762 cmov_optab
= init_optab (UNKNOWN
);
4763 cstore_optab
= init_optab (UNKNOWN
);
4764 push_optab
= init_optab (UNKNOWN
);
4766 vec_extract_optab
= init_optab (UNKNOWN
);
4767 vec_set_optab
= init_optab (UNKNOWN
);
4768 vec_init_optab
= init_optab (UNKNOWN
);
4769 vec_realign_load_optab
= init_optab (UNKNOWN
);
4772 sext_optab
= init_convert_optab (SIGN_EXTEND
);
4773 zext_optab
= init_convert_optab (ZERO_EXTEND
);
4774 trunc_optab
= init_convert_optab (TRUNCATE
);
4775 sfix_optab
= init_convert_optab (FIX
);
4776 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
4777 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
4778 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
4779 sfloat_optab
= init_convert_optab (FLOAT
);
4780 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
4782 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4784 movmem_optab
[i
] = CODE_FOR_nothing
;
4785 clrmem_optab
[i
] = CODE_FOR_nothing
;
4786 cmpstr_optab
[i
] = CODE_FOR_nothing
;
4787 cmpmem_optab
[i
] = CODE_FOR_nothing
;
4789 #ifdef HAVE_SECONDARY_RELOADS
4790 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4794 /* Fill in the optabs with the insns we support. */
4797 /* Initialize the optabs with the names of the library functions. */
4798 init_integral_libfuncs (add_optab
, "add", '3');
4799 init_floating_libfuncs (add_optab
, "add", '3');
4800 init_integral_libfuncs (addv_optab
, "addv", '3');
4801 init_floating_libfuncs (addv_optab
, "add", '3');
4802 init_integral_libfuncs (sub_optab
, "sub", '3');
4803 init_floating_libfuncs (sub_optab
, "sub", '3');
4804 init_integral_libfuncs (subv_optab
, "subv", '3');
4805 init_floating_libfuncs (subv_optab
, "sub", '3');
4806 init_integral_libfuncs (smul_optab
, "mul", '3');
4807 init_floating_libfuncs (smul_optab
, "mul", '3');
4808 init_integral_libfuncs (smulv_optab
, "mulv", '3');
4809 init_floating_libfuncs (smulv_optab
, "mul", '3');
4810 init_integral_libfuncs (sdiv_optab
, "div", '3');
4811 init_floating_libfuncs (sdiv_optab
, "div", '3');
4812 init_integral_libfuncs (sdivv_optab
, "divv", '3');
4813 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4814 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4815 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4816 init_integral_libfuncs (smod_optab
, "mod", '3');
4817 init_integral_libfuncs (umod_optab
, "umod", '3');
4818 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4819 init_integral_libfuncs (and_optab
, "and", '3');
4820 init_integral_libfuncs (ior_optab
, "ior", '3');
4821 init_integral_libfuncs (xor_optab
, "xor", '3');
4822 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4823 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4824 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4825 init_integral_libfuncs (smin_optab
, "min", '3');
4826 init_floating_libfuncs (smin_optab
, "min", '3');
4827 init_integral_libfuncs (smax_optab
, "max", '3');
4828 init_floating_libfuncs (smax_optab
, "max", '3');
4829 init_integral_libfuncs (umin_optab
, "umin", '3');
4830 init_integral_libfuncs (umax_optab
, "umax", '3');
4831 init_integral_libfuncs (neg_optab
, "neg", '2');
4832 init_floating_libfuncs (neg_optab
, "neg", '2');
4833 init_integral_libfuncs (negv_optab
, "negv", '2');
4834 init_floating_libfuncs (negv_optab
, "neg", '2');
4835 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4836 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4837 init_integral_libfuncs (clz_optab
, "clz", '2');
4838 init_integral_libfuncs (ctz_optab
, "ctz", '2');
4839 init_integral_libfuncs (popcount_optab
, "popcount", '2');
4840 init_integral_libfuncs (parity_optab
, "parity", '2');
4842 /* Comparison libcalls for integers MUST come in pairs,
4844 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4845 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4846 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4848 /* EQ etc are floating point only. */
4849 init_floating_libfuncs (eq_optab
, "eq", '2');
4850 init_floating_libfuncs (ne_optab
, "ne", '2');
4851 init_floating_libfuncs (gt_optab
, "gt", '2');
4852 init_floating_libfuncs (ge_optab
, "ge", '2');
4853 init_floating_libfuncs (lt_optab
, "lt", '2');
4854 init_floating_libfuncs (le_optab
, "le", '2');
4855 init_floating_libfuncs (unord_optab
, "unord", '2');
4858 init_interclass_conv_libfuncs (sfloat_optab
, "float",
4859 MODE_INT
, MODE_FLOAT
);
4860 init_interclass_conv_libfuncs (sfix_optab
, "fix",
4861 MODE_FLOAT
, MODE_INT
);
4862 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
4863 MODE_FLOAT
, MODE_INT
);
4865 /* sext_optab is also used for FLOAT_EXTEND. */
4866 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
4867 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
4869 /* Use cabs for double complex abs, since systems generally have cabs.
4870 Don't define any libcall for float complex, so that cabs will be used. */
4871 if (complex_double_type_node
)
4872 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
4873 = init_one_libfunc ("cabs");
4875 /* The ffs function operates on `int'. */
4876 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
4877 = init_one_libfunc ("ffs");
4879 abort_libfunc
= init_one_libfunc ("abort");
4880 memcpy_libfunc
= init_one_libfunc ("memcpy");
4881 memmove_libfunc
= init_one_libfunc ("memmove");
4882 memcmp_libfunc
= init_one_libfunc ("memcmp");
4883 memset_libfunc
= init_one_libfunc ("memset");
4884 setbits_libfunc
= init_one_libfunc ("__setbits");
4886 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
4887 ? "_Unwind_SjLj_Resume"
4888 : "_Unwind_Resume");
4889 #ifndef DONT_USE_BUILTIN_SETJMP
4890 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
4891 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
4893 setjmp_libfunc
= init_one_libfunc ("setjmp");
4894 longjmp_libfunc
= init_one_libfunc ("longjmp");
4896 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
4897 unwind_sjlj_unregister_libfunc
4898 = init_one_libfunc ("_Unwind_SjLj_Unregister");
4900 /* For function entry/exit instrumentation. */
4901 profile_function_entry_libfunc
4902 = init_one_libfunc ("__cyg_profile_func_enter");
4903 profile_function_exit_libfunc
4904 = init_one_libfunc ("__cyg_profile_func_exit");
4906 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
4908 if (HAVE_conditional_trap
)
4909 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
4911 /* Allow the target to add more libcalls or rename some, etc. */
4912 targetm
.init_libfuncs ();
4917 /* Print information about the current contents of the optabs on
4921 debug_optab_libfuncs (void)
4927 /* Dump the arithmetic optabs. */
4928 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
4929 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4932 struct optab_handlers
*h
;
4935 h
= &o
->handlers
[j
];
4938 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
4940 fprintf (stderr
, "%s\t%s:\t%s\n",
4941 GET_RTX_NAME (o
->code
),
4943 XSTR (h
->libfunc
, 0));
4947 /* Dump the conversion optabs. */
4948 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
4949 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4950 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
4953 struct optab_handlers
*h
;
4955 o
= &convert_optab_table
[i
];
4956 h
= &o
->handlers
[j
][k
];
4959 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
4961 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
4962 GET_RTX_NAME (o
->code
),
4965 XSTR (h
->libfunc
, 0));
4973 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
4974 CODE. Return 0 on failure. */
4977 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
4978 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
4980 enum machine_mode mode
= GET_MODE (op1
);
4981 enum insn_code icode
;
4984 if (!HAVE_conditional_trap
)
4987 if (mode
== VOIDmode
)
4990 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
4991 if (icode
== CODE_FOR_nothing
)
4995 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
4996 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5002 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5004 PUT_CODE (trap_rtx
, code
);
5005 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5009 insn
= get_insns ();
5016 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5017 or unsigned operation code. */
5019 static enum rtx_code
5020 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5032 code
= unsignedp
? LTU
: LT
;
5035 code
= unsignedp
? LEU
: LE
;
5038 code
= unsignedp
? GTU
: GT
;
5041 code
= unsignedp
? GEU
: GE
;
5044 case UNORDERED_EXPR
:
5075 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5076 unsigned operators. Do not generate compare instruction. */
5079 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5081 enum rtx_code rcode
;
5083 rtx rtx_op0
, rtx_op1
;
5085 if (COMPARISON_CLASS_P (cond
))
5087 /* This is unlikely. While generating VEC_COND_EXPR,
5088 auto vectorizer ensures that condition is a relational
5094 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5095 t_op0
= TREE_OPERAND (cond
, 0);
5096 t_op1
= TREE_OPERAND (cond
, 1);
5099 /* Expand operands. */
5100 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5101 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5103 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5104 && GET_MODE (rtx_op0
) != VOIDmode
)
5105 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5107 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5108 && GET_MODE (rtx_op1
) != VOIDmode
)
5109 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5111 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5114 /* Return insn code for VEC_COND_EXPR EXPR. */
5116 static inline enum insn_code
5117 get_vcond_icode (tree expr
, enum machine_mode mode
)
5119 enum insn_code icode
= CODE_FOR_nothing
;
5121 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5122 icode
= vcondu_gen_code
[mode
];
5124 icode
= vcond_gen_code
[mode
];
5128 /* Return TRUE iff, appropriate vector insns are available
5129 for vector cond expr expr in VMODE mode. */
5132 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5134 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5139 /* Generate insns for VEC_COND_EXPR. */
5142 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5144 enum insn_code icode
;
5145 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5146 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5147 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5149 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5150 if (icode
== CODE_FOR_nothing
)
5154 target
= gen_reg_rtx (mode
);
5156 /* Get comparison rtx. First expand both cond expr operands. */
5157 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5159 cc_op0
= XEXP (comparison
, 0);
5160 cc_op1
= XEXP (comparison
, 1);
5161 /* Expand both operands and force them in reg, if required. */
5162 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5163 NULL_RTX
, VOIDmode
, 1);
5164 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5165 && mode
!= VOIDmode
)
5166 rtx_op1
= force_reg (mode
, rtx_op1
);
5168 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5169 NULL_RTX
, VOIDmode
, 1);
5170 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5171 && mode
!= VOIDmode
)
5172 rtx_op2
= force_reg (mode
, rtx_op2
);
5174 /* Emit instruction! */
5175 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5176 comparison
, cc_op0
, cc_op1
));
5180 #include "gt-optabs.h"