1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_STORE_EXPR
:
298 return vec_realign_store_optab
;
300 case REALIGN_LOAD_EXPR
:
301 return vec_realign_load_optab
;
307 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
311 return trapv
? addv_optab
: add_optab
;
314 return trapv
? subv_optab
: sub_optab
;
317 return trapv
? smulv_optab
: smul_optab
;
320 return trapv
? negv_optab
: neg_optab
;
323 return trapv
? absv_optab
: abs_optab
;
331 /* Generate code to perform an operation specified by TERNARY_OPTAB
332 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
334 UNSIGNEDP is for the case where we have to widen the operands
335 to perform the operation. It says to use zero-extension.
337 If TARGET is nonzero, the value
338 is generated there, if it is convenient to do so.
339 In all cases an rtx is returned for the locus of the value;
340 this may or may not be TARGET. */
343 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
344 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
346 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
347 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
348 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
349 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
352 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
354 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
358 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
359 temp
= gen_reg_rtx (mode
);
363 /* In case the insn wants input operands in modes different from
364 those of the actual operands, convert the operands. It would
365 seem that we don't need to convert CONST_INTs, but we do, so
366 that they're properly zero-extended, sign-extended or truncated
369 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
370 xop0
= convert_modes (mode0
,
371 GET_MODE (op0
) != VOIDmode
376 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
377 xop1
= convert_modes (mode1
,
378 GET_MODE (op1
) != VOIDmode
383 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
384 xop2
= convert_modes (mode2
,
385 GET_MODE (op2
) != VOIDmode
390 /* Now, if insn's predicates don't allow our operands, put them into
393 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
394 && mode0
!= VOIDmode
)
395 xop0
= copy_to_mode_reg (mode0
, xop0
);
397 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
398 && mode1
!= VOIDmode
)
399 xop1
= copy_to_mode_reg (mode1
, xop1
);
401 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
402 && mode2
!= VOIDmode
)
403 xop2
= copy_to_mode_reg (mode2
, xop2
);
405 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
412 /* Like expand_binop, but return a constant rtx if the result can be
413 calculated at compile time. The arguments and return value are
414 otherwise the same as for expand_binop. */
417 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
418 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
419 enum optab_methods methods
)
421 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
422 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
424 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
427 /* Like simplify_expand_binop, but always put the result in TARGET.
428 Return true if the expansion succeeded. */
431 force_expand_binop (enum machine_mode mode
, optab binoptab
,
432 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
433 enum optab_methods methods
)
435 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
436 target
, unsignedp
, methods
);
440 emit_move_insn (target
, x
);
444 /* This subroutine of expand_doubleword_shift handles the cases in which
445 the effective shift value is >= BITS_PER_WORD. The arguments and return
446 value are the same as for the parent routine, except that SUPERWORD_OP1
447 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
448 INTO_TARGET may be null if the caller has decided to calculate it. */
451 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
452 rtx outof_target
, rtx into_target
,
453 int unsignedp
, enum optab_methods methods
)
455 if (into_target
!= 0)
456 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
457 into_target
, unsignedp
, methods
))
460 if (outof_target
!= 0)
462 /* For a signed right shift, we must fill OUTOF_TARGET with copies
463 of the sign bit, otherwise we must fill it with zeros. */
464 if (binoptab
!= ashr_optab
)
465 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
467 if (!force_expand_binop (word_mode
, binoptab
,
468 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
469 outof_target
, unsignedp
, methods
))
475 /* This subroutine of expand_doubleword_shift handles the cases in which
476 the effective shift value is < BITS_PER_WORD. The arguments and return
477 value are the same as for the parent routine. */
480 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
481 rtx outof_input
, rtx into_input
, rtx op1
,
482 rtx outof_target
, rtx into_target
,
483 int unsignedp
, enum optab_methods methods
,
484 unsigned HOST_WIDE_INT shift_mask
)
486 optab reverse_unsigned_shift
, unsigned_shift
;
489 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
490 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
492 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
493 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
494 the opposite direction to BINOPTAB. */
495 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
497 carries
= outof_input
;
498 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
499 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
504 /* We must avoid shifting by BITS_PER_WORD bits since that is either
505 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
506 has unknown behavior. Do a single shift first, then shift by the
507 remainder. It's OK to use ~OP1 as the remainder if shift counts
508 are truncated to the mode size. */
509 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
510 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
511 if (shift_mask
== BITS_PER_WORD
- 1)
513 tmp
= immed_double_const (-1, -1, op1_mode
);
514 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
519 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
520 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
524 if (tmp
== 0 || carries
== 0)
526 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
527 carries
, tmp
, 0, unsignedp
, methods
);
531 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
532 so the result can go directly into INTO_TARGET if convenient. */
533 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
534 into_target
, unsignedp
, methods
);
538 /* Now OR in the bits carried over from OUTOF_INPUT. */
539 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
540 into_target
, unsignedp
, methods
))
543 /* Use a standard word_mode shift for the out-of half. */
544 if (outof_target
!= 0)
545 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
546 outof_target
, unsignedp
, methods
))
553 #ifdef HAVE_conditional_move
554 /* Try implementing expand_doubleword_shift using conditional moves.
555 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
556 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
557 are the shift counts to use in the former and latter case. All other
558 arguments are the same as the parent routine. */
561 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
562 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
563 rtx outof_input
, rtx into_input
,
564 rtx subword_op1
, rtx superword_op1
,
565 rtx outof_target
, rtx into_target
,
566 int unsignedp
, enum optab_methods methods
,
567 unsigned HOST_WIDE_INT shift_mask
)
569 rtx outof_superword
, into_superword
;
571 /* Put the superword version of the output into OUTOF_SUPERWORD and
573 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
574 if (outof_target
!= 0 && subword_op1
== superword_op1
)
576 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
577 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
578 into_superword
= outof_target
;
579 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
580 outof_superword
, 0, unsignedp
, methods
))
585 into_superword
= gen_reg_rtx (word_mode
);
586 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
587 outof_superword
, into_superword
,
592 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
593 if (!expand_subword_shift (op1_mode
, binoptab
,
594 outof_input
, into_input
, subword_op1
,
595 outof_target
, into_target
,
596 unsignedp
, methods
, shift_mask
))
599 /* Select between them. Do the INTO half first because INTO_SUPERWORD
600 might be the current value of OUTOF_TARGET. */
601 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
602 into_target
, into_superword
, word_mode
, false))
605 if (outof_target
!= 0)
606 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
607 outof_target
, outof_superword
,
615 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
616 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
617 input operand; the shift moves bits in the direction OUTOF_INPUT->
618 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
619 of the target. OP1 is the shift count and OP1_MODE is its mode.
620 If OP1 is constant, it will have been truncated as appropriate
621 and is known to be nonzero.
623 If SHIFT_MASK is zero, the result of word shifts is undefined when the
624 shift count is outside the range [0, BITS_PER_WORD). This routine must
625 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
627 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
628 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
629 fill with zeros or sign bits as appropriate.
631 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
632 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
633 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
634 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
637 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
638 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
639 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
640 function wants to calculate it itself.
642 Return true if the shift could be successfully synthesized. */
645 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
646 rtx outof_input
, rtx into_input
, rtx op1
,
647 rtx outof_target
, rtx into_target
,
648 int unsignedp
, enum optab_methods methods
,
649 unsigned HOST_WIDE_INT shift_mask
)
651 rtx superword_op1
, tmp
, cmp1
, cmp2
;
652 rtx subword_label
, done_label
;
653 enum rtx_code cmp_code
;
655 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
656 fill the result with sign or zero bits as appropriate. If so, the value
657 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
658 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
659 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
661 This isn't worthwhile for constant shifts since the optimizers will
662 cope better with in-range shift counts. */
663 if (shift_mask
>= BITS_PER_WORD
665 && !CONSTANT_P (op1
))
667 if (!expand_doubleword_shift (op1_mode
, binoptab
,
668 outof_input
, into_input
, op1
,
670 unsignedp
, methods
, shift_mask
))
672 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
673 outof_target
, unsignedp
, methods
))
678 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
679 is true when the effective shift value is less than BITS_PER_WORD.
680 Set SUPERWORD_OP1 to the shift count that should be used to shift
681 OUTOF_INPUT into INTO_TARGET when the condition is false. */
682 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
683 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
685 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
686 is a subword shift count. */
687 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
689 cmp2
= CONST0_RTX (op1_mode
);
695 /* Set CMP1 to OP1 - BITS_PER_WORD. */
696 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
698 cmp2
= CONST0_RTX (op1_mode
);
700 superword_op1
= cmp1
;
705 /* If we can compute the condition at compile time, pick the
706 appropriate subroutine. */
707 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
708 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
710 if (tmp
== const0_rtx
)
711 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
712 outof_target
, into_target
,
715 return expand_subword_shift (op1_mode
, binoptab
,
716 outof_input
, into_input
, op1
,
717 outof_target
, into_target
,
718 unsignedp
, methods
, shift_mask
);
721 #ifdef HAVE_conditional_move
722 /* Try using conditional moves to generate straight-line code. */
724 rtx start
= get_last_insn ();
725 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
726 cmp_code
, cmp1
, cmp2
,
727 outof_input
, into_input
,
729 outof_target
, into_target
,
730 unsignedp
, methods
, shift_mask
))
732 delete_insns_since (start
);
736 /* As a last resort, use branches to select the correct alternative. */
737 subword_label
= gen_label_rtx ();
738 done_label
= gen_label_rtx ();
740 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
741 0, 0, subword_label
);
743 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
744 outof_target
, into_target
,
748 emit_jump_insn (gen_jump (done_label
));
750 emit_label (subword_label
);
752 if (!expand_subword_shift (op1_mode
, binoptab
,
753 outof_input
, into_input
, op1
,
754 outof_target
, into_target
,
755 unsignedp
, methods
, shift_mask
))
758 emit_label (done_label
);
762 /* Wrapper around expand_binop which takes an rtx code to specify
763 the operation to perform, not an optab pointer. All other
764 arguments are the same. */
766 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
767 rtx op1
, rtx target
, int unsignedp
,
768 enum optab_methods methods
)
770 optab binop
= code_to_optab
[(int) code
];
774 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
777 /* Generate code to perform an operation specified by BINOPTAB
778 on operands OP0 and OP1, with result having machine-mode MODE.
780 UNSIGNEDP is for the case where we have to widen the operands
781 to perform the operation. It says to use zero-extension.
783 If TARGET is nonzero, the value
784 is generated there, if it is convenient to do so.
785 In all cases an rtx is returned for the locus of the value;
786 this may or may not be TARGET. */
789 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
790 rtx target
, int unsignedp
, enum optab_methods methods
)
792 enum optab_methods next_methods
793 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
794 ? OPTAB_WIDEN
: methods
);
795 enum mode_class
class;
796 enum machine_mode wider_mode
;
798 int commutative_op
= 0;
799 int shift_op
= (binoptab
->code
== ASHIFT
800 || binoptab
->code
== ASHIFTRT
801 || binoptab
->code
== LSHIFTRT
802 || binoptab
->code
== ROTATE
803 || binoptab
->code
== ROTATERT
);
804 rtx entry_last
= get_last_insn ();
807 class = GET_MODE_CLASS (mode
);
811 /* Load duplicate non-volatile operands once. */
812 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
814 op0
= force_not_mem (op0
);
819 op0
= force_not_mem (op0
);
820 op1
= force_not_mem (op1
);
824 /* If subtracting an integer constant, convert this into an addition of
825 the negated constant. */
827 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
829 op1
= negate_rtx (mode
, op1
);
830 binoptab
= add_optab
;
833 /* If we are inside an appropriately-short loop and we are optimizing,
834 force expensive constants into a register. */
835 if (CONSTANT_P (op0
) && optimize
836 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
838 if (GET_MODE (op0
) != VOIDmode
)
839 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
840 op0
= force_reg (mode
, op0
);
843 if (CONSTANT_P (op1
) && optimize
844 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
846 if (GET_MODE (op1
) != VOIDmode
)
847 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
848 op1
= force_reg (mode
, op1
);
851 /* Record where to delete back to if we backtrack. */
852 last
= get_last_insn ();
854 /* If operation is commutative,
855 try to make the first operand a register.
856 Even better, try to make it the same as the target.
857 Also try to make the last operand a constant. */
858 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
859 || binoptab
== smul_widen_optab
860 || binoptab
== umul_widen_optab
861 || binoptab
== smul_highpart_optab
862 || binoptab
== umul_highpart_optab
)
866 if (((target
== 0 || REG_P (target
))
870 : rtx_equal_p (op1
, target
))
871 || GET_CODE (op0
) == CONST_INT
)
879 /* If we can do it with a three-operand insn, do so. */
881 if (methods
!= OPTAB_MUST_WIDEN
882 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
884 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
885 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
886 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
888 rtx xop0
= op0
, xop1
= op1
;
893 temp
= gen_reg_rtx (mode
);
895 /* If it is a commutative operator and the modes would match
896 if we would swap the operands, we can save the conversions. */
899 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
900 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
904 tmp
= op0
; op0
= op1
; op1
= tmp
;
905 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
909 /* In case the insn wants input operands in modes different from
910 those of the actual operands, convert the operands. It would
911 seem that we don't need to convert CONST_INTs, but we do, so
912 that they're properly zero-extended, sign-extended or truncated
915 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
916 xop0
= convert_modes (mode0
,
917 GET_MODE (op0
) != VOIDmode
922 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
923 xop1
= convert_modes (mode1
,
924 GET_MODE (op1
) != VOIDmode
929 /* Now, if insn's predicates don't allow our operands, put them into
932 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
933 && mode0
!= VOIDmode
)
934 xop0
= copy_to_mode_reg (mode0
, xop0
);
936 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
937 && mode1
!= VOIDmode
)
938 xop1
= copy_to_mode_reg (mode1
, xop1
);
940 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
941 temp
= gen_reg_rtx (mode
);
943 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
946 /* If PAT is composed of more than one insn, try to add an appropriate
947 REG_EQUAL note to it. If we can't because TEMP conflicts with an
948 operand, call ourselves again, this time without a target. */
949 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
950 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
952 delete_insns_since (last
);
953 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
961 delete_insns_since (last
);
964 /* If this is a multiply, see if we can do a widening operation that
965 takes operands of this mode and makes a wider mode. */
967 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
968 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
969 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
970 != CODE_FOR_nothing
))
972 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
973 unsignedp
? umul_widen_optab
: smul_widen_optab
,
974 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
978 if (GET_MODE_CLASS (mode
) == MODE_INT
)
979 return gen_lowpart (mode
, temp
);
981 return convert_to_mode (mode
, temp
, unsignedp
);
985 /* Look for a wider mode of the same class for which we think we
986 can open-code the operation. Check for a widening multiply at the
987 wider mode as well. */
989 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
990 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
991 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
992 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
994 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
995 || (binoptab
== smul_optab
996 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
997 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
998 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
999 != CODE_FOR_nothing
)))
1001 rtx xop0
= op0
, xop1
= op1
;
1004 /* For certain integer operations, we need not actually extend
1005 the narrow operands, as long as we will truncate
1006 the results to the same narrowness. */
1008 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1009 || binoptab
== xor_optab
1010 || binoptab
== add_optab
|| binoptab
== sub_optab
1011 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1012 && class == MODE_INT
)
1015 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1017 /* The second operand of a shift must always be extended. */
1018 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1019 no_extend
&& binoptab
!= ashl_optab
);
1021 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1022 unsignedp
, OPTAB_DIRECT
);
1025 if (class != MODE_INT
)
1028 target
= gen_reg_rtx (mode
);
1029 convert_move (target
, temp
, 0);
1033 return gen_lowpart (mode
, temp
);
1036 delete_insns_since (last
);
1040 /* These can be done a word at a time. */
1041 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1042 && class == MODE_INT
1043 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1044 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1050 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1051 won't be accurate, so use a new target. */
1052 if (target
== 0 || target
== op0
|| target
== op1
)
1053 target
= gen_reg_rtx (mode
);
1057 /* Do the actual arithmetic. */
1058 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1060 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1061 rtx x
= expand_binop (word_mode
, binoptab
,
1062 operand_subword_force (op0
, i
, mode
),
1063 operand_subword_force (op1
, i
, mode
),
1064 target_piece
, unsignedp
, next_methods
);
1069 if (target_piece
!= x
)
1070 emit_move_insn (target_piece
, x
);
1073 insns
= get_insns ();
1076 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1078 if (binoptab
->code
!= UNKNOWN
)
1080 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1081 copy_rtx (op0
), copy_rtx (op1
));
1085 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1090 /* Synthesize double word shifts from single word shifts. */
1091 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1092 || binoptab
== ashr_optab
)
1093 && class == MODE_INT
1094 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1095 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1096 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1097 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1098 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1100 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1101 enum machine_mode op1_mode
;
1103 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1104 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1105 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1107 /* Apply the truncation to constant shifts. */
1108 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1109 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1111 if (op1
== CONST0_RTX (op1_mode
))
1114 /* Make sure that this is a combination that expand_doubleword_shift
1115 can handle. See the comments there for details. */
1116 if (double_shift_mask
== 0
1117 || (shift_mask
== BITS_PER_WORD
- 1
1118 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1120 rtx insns
, equiv_value
;
1121 rtx into_target
, outof_target
;
1122 rtx into_input
, outof_input
;
1123 int left_shift
, outof_word
;
1125 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1126 won't be accurate, so use a new target. */
1127 if (target
== 0 || target
== op0
|| target
== op1
)
1128 target
= gen_reg_rtx (mode
);
1132 /* OUTOF_* is the word we are shifting bits away from, and
1133 INTO_* is the word that we are shifting bits towards, thus
1134 they differ depending on the direction of the shift and
1135 WORDS_BIG_ENDIAN. */
1137 left_shift
= binoptab
== ashl_optab
;
1138 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1140 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1141 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1143 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1144 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1146 if (expand_doubleword_shift (op1_mode
, binoptab
,
1147 outof_input
, into_input
, op1
,
1148 outof_target
, into_target
,
1149 unsignedp
, methods
, shift_mask
))
1151 insns
= get_insns ();
1154 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1155 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1162 /* Synthesize double word rotates from single word shifts. */
1163 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1164 && class == MODE_INT
1165 && GET_CODE (op1
) == CONST_INT
1166 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1167 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1168 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1170 rtx insns
, equiv_value
;
1171 rtx into_target
, outof_target
;
1172 rtx into_input
, outof_input
;
1174 int shift_count
, left_shift
, outof_word
;
1176 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1177 won't be accurate, so use a new target. Do this also if target is not
1178 a REG, first because having a register instead may open optimization
1179 opportunities, and second because if target and op0 happen to be MEMs
1180 designating the same location, we would risk clobbering it too early
1181 in the code sequence we generate below. */
1182 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1183 target
= gen_reg_rtx (mode
);
1187 shift_count
= INTVAL (op1
);
1189 /* OUTOF_* is the word we are shifting bits away from, and
1190 INTO_* is the word that we are shifting bits towards, thus
1191 they differ depending on the direction of the shift and
1192 WORDS_BIG_ENDIAN. */
1194 left_shift
= (binoptab
== rotl_optab
);
1195 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1197 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1198 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1200 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1201 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1203 if (shift_count
== BITS_PER_WORD
)
1205 /* This is just a word swap. */
1206 emit_move_insn (outof_target
, into_input
);
1207 emit_move_insn (into_target
, outof_input
);
1212 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1213 rtx first_shift_count
, second_shift_count
;
1214 optab reverse_unsigned_shift
, unsigned_shift
;
1216 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1217 ? lshr_optab
: ashl_optab
);
1219 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1220 ? ashl_optab
: lshr_optab
);
1222 if (shift_count
> BITS_PER_WORD
)
1224 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1225 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1229 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1230 second_shift_count
= GEN_INT (shift_count
);
1233 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1234 outof_input
, first_shift_count
,
1235 NULL_RTX
, unsignedp
, next_methods
);
1236 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1237 into_input
, second_shift_count
,
1238 NULL_RTX
, unsignedp
, next_methods
);
1240 if (into_temp1
!= 0 && into_temp2
!= 0)
1241 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1242 into_target
, unsignedp
, next_methods
);
1246 if (inter
!= 0 && inter
!= into_target
)
1247 emit_move_insn (into_target
, inter
);
1249 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1250 into_input
, first_shift_count
,
1251 NULL_RTX
, unsignedp
, next_methods
);
1252 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1253 outof_input
, second_shift_count
,
1254 NULL_RTX
, unsignedp
, next_methods
);
1256 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1257 inter
= expand_binop (word_mode
, ior_optab
,
1258 outof_temp1
, outof_temp2
,
1259 outof_target
, unsignedp
, next_methods
);
1261 if (inter
!= 0 && inter
!= outof_target
)
1262 emit_move_insn (outof_target
, inter
);
1265 insns
= get_insns ();
1270 if (binoptab
->code
!= UNKNOWN
)
1271 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1275 /* We can't make this a no conflict block if this is a word swap,
1276 because the word swap case fails if the input and output values
1277 are in the same register. */
1278 if (shift_count
!= BITS_PER_WORD
)
1279 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1288 /* These can be done a word at a time by propagating carries. */
1289 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1290 && class == MODE_INT
1291 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1292 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1295 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1296 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1297 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1298 rtx xop0
, xop1
, xtarget
;
1300 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1301 value is one of those, use it. Otherwise, use 1 since it is the
1302 one easiest to get. */
1303 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1304 int normalizep
= STORE_FLAG_VALUE
;
1309 /* Prepare the operands. */
1310 xop0
= force_reg (mode
, op0
);
1311 xop1
= force_reg (mode
, op1
);
1313 xtarget
= gen_reg_rtx (mode
);
1315 if (target
== 0 || !REG_P (target
))
1318 /* Indicate for flow that the entire target reg is being set. */
1320 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1322 /* Do the actual arithmetic. */
1323 for (i
= 0; i
< nwords
; i
++)
1325 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1326 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1327 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1328 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1331 /* Main add/subtract of the input operands. */
1332 x
= expand_binop (word_mode
, binoptab
,
1333 op0_piece
, op1_piece
,
1334 target_piece
, unsignedp
, next_methods
);
1340 /* Store carry from main add/subtract. */
1341 carry_out
= gen_reg_rtx (word_mode
);
1342 carry_out
= emit_store_flag_force (carry_out
,
1343 (binoptab
== add_optab
1346 word_mode
, 1, normalizep
);
1353 /* Add/subtract previous carry to main result. */
1354 newx
= expand_binop (word_mode
,
1355 normalizep
== 1 ? binoptab
: otheroptab
,
1357 NULL_RTX
, 1, next_methods
);
1361 /* Get out carry from adding/subtracting carry in. */
1362 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1363 carry_tmp
= emit_store_flag_force (carry_tmp
,
1364 (binoptab
== add_optab
1367 word_mode
, 1, normalizep
);
1369 /* Logical-ior the two poss. carry together. */
1370 carry_out
= expand_binop (word_mode
, ior_optab
,
1371 carry_out
, carry_tmp
,
1372 carry_out
, 0, next_methods
);
1376 emit_move_insn (target_piece
, newx
);
1379 carry_in
= carry_out
;
1382 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1384 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1385 || ! rtx_equal_p (target
, xtarget
))
1387 rtx temp
= emit_move_insn (target
, xtarget
);
1389 set_unique_reg_note (temp
,
1391 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1402 delete_insns_since (last
);
1405 /* If we want to multiply two two-word values and have normal and widening
1406 multiplies of single-word values, we can do this with three smaller
1407 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1408 because we are not operating on one word at a time.
1410 The multiplication proceeds as follows:
1411 _______________________
1412 [__op0_high_|__op0_low__]
1413 _______________________
1414 * [__op1_high_|__op1_low__]
1415 _______________________________________________
1416 _______________________
1417 (1) [__op0_low__*__op1_low__]
1418 _______________________
1419 (2a) [__op0_low__*__op1_high_]
1420 _______________________
1421 (2b) [__op0_high_*__op1_low__]
1422 _______________________
1423 (3) [__op0_high_*__op1_high_]
1426 This gives a 4-word result. Since we are only interested in the
1427 lower 2 words, partial result (3) and the upper words of (2a) and
1428 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1429 calculated using non-widening multiplication.
1431 (1), however, needs to be calculated with an unsigned widening
1432 multiplication. If this operation is not directly supported we
1433 try using a signed widening multiplication and adjust the result.
1434 This adjustment works as follows:
1436 If both operands are positive then no adjustment is needed.
1438 If the operands have different signs, for example op0_low < 0 and
1439 op1_low >= 0, the instruction treats the most significant bit of
1440 op0_low as a sign bit instead of a bit with significance
1441 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1442 with 2**BITS_PER_WORD - op0_low, and two's complements the
1443 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1446 Similarly, if both operands are negative, we need to add
1447 (op0_low + op1_low) * 2**BITS_PER_WORD.
1449 We use a trick to adjust quickly. We logically shift op0_low right
1450 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1451 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1452 logical shift exists, we do an arithmetic right shift and subtract
1455 if (binoptab
== smul_optab
1456 && class == MODE_INT
1457 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1458 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1459 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1460 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1461 != CODE_FOR_nothing
)
1462 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1463 != CODE_FOR_nothing
)))
1465 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1466 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1467 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1468 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1469 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1470 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1472 rtx op0_xhigh
= NULL_RTX
;
1473 rtx op1_xhigh
= NULL_RTX
;
1475 /* If the target is the same as one of the inputs, don't use it. This
1476 prevents problems with the REG_EQUAL note. */
1477 if (target
== op0
|| target
== op1
1478 || (target
!= 0 && !REG_P (target
)))
1481 /* Multiply the two lower words to get a double-word product.
1482 If unsigned widening multiplication is available, use that;
1483 otherwise use the signed form and compensate. */
1485 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1487 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1488 target
, 1, OPTAB_DIRECT
);
1490 /* If we didn't succeed, delete everything we did so far. */
1492 delete_insns_since (last
);
1494 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1498 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1499 != CODE_FOR_nothing
)
1501 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1502 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1503 target
, 1, OPTAB_DIRECT
);
1504 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1505 NULL_RTX
, 1, next_methods
);
1507 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1508 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1511 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1512 NULL_RTX
, 0, next_methods
);
1514 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1515 op0_xhigh
, op0_xhigh
, 0,
1519 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1520 NULL_RTX
, 1, next_methods
);
1522 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1523 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1526 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1527 NULL_RTX
, 0, next_methods
);
1529 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1530 op1_xhigh
, op1_xhigh
, 0,
1535 /* If we have been able to directly compute the product of the
1536 low-order words of the operands and perform any required adjustments
1537 of the operands, we proceed by trying two more multiplications
1538 and then computing the appropriate sum.
1540 We have checked above that the required addition is provided.
1541 Full-word addition will normally always succeed, especially if
1542 it is provided at all, so we don't worry about its failure. The
1543 multiplication may well fail, however, so we do handle that. */
1545 if (product
&& op0_xhigh
&& op1_xhigh
)
1547 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1548 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1549 NULL_RTX
, 0, OPTAB_DIRECT
);
1551 if (!REG_P (product_high
))
1552 product_high
= force_reg (word_mode
, product_high
);
1555 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1556 product_high
, 0, next_methods
);
1558 if (temp
!= 0 && temp
!= product_high
)
1559 emit_move_insn (product_high
, temp
);
1562 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1563 NULL_RTX
, 0, OPTAB_DIRECT
);
1566 temp
= expand_binop (word_mode
, add_optab
, temp
,
1567 product_high
, product_high
,
1570 if (temp
!= 0 && temp
!= product_high
)
1571 emit_move_insn (product_high
, temp
);
1573 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1577 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1579 temp
= emit_move_insn (product
, product
);
1580 set_unique_reg_note (temp
,
1582 gen_rtx_fmt_ee (MULT
, mode
,
1591 /* If we get here, we couldn't do it for some reason even though we
1592 originally thought we could. Delete anything we've emitted in
1595 delete_insns_since (last
);
1598 /* It can't be open-coded in this mode.
1599 Use a library call if one is available and caller says that's ok. */
1601 if (binoptab
->handlers
[(int) mode
].libfunc
1602 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1606 enum machine_mode op1_mode
= mode
;
1613 op1_mode
= word_mode
;
1614 /* Specify unsigned here,
1615 since negative shift counts are meaningless. */
1616 op1x
= convert_to_mode (word_mode
, op1
, 1);
1619 if (GET_MODE (op0
) != VOIDmode
1620 && GET_MODE (op0
) != mode
)
1621 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1623 /* Pass 1 for NO_QUEUE so we don't lose any increments
1624 if the libcall is cse'd or moved. */
1625 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1626 NULL_RTX
, LCT_CONST
, mode
, 2,
1627 op0
, mode
, op1x
, op1_mode
);
1629 insns
= get_insns ();
1632 target
= gen_reg_rtx (mode
);
1633 emit_libcall_block (insns
, target
, value
,
1634 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1639 delete_insns_since (last
);
1641 /* It can't be done in this mode. Can we do it in a wider mode? */
1643 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1644 || methods
== OPTAB_MUST_WIDEN
))
1646 /* Caller says, don't even try. */
1647 delete_insns_since (entry_last
);
1651 /* Compute the value of METHODS to pass to recursive calls.
1652 Don't allow widening to be tried recursively. */
1654 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1656 /* Look for a wider mode of the same class for which it appears we can do
1659 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1661 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1662 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1664 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1665 != CODE_FOR_nothing
)
1666 || (methods
== OPTAB_LIB
1667 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1669 rtx xop0
= op0
, xop1
= op1
;
1672 /* For certain integer operations, we need not actually extend
1673 the narrow operands, as long as we will truncate
1674 the results to the same narrowness. */
1676 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1677 || binoptab
== xor_optab
1678 || binoptab
== add_optab
|| binoptab
== sub_optab
1679 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1680 && class == MODE_INT
)
1683 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1684 unsignedp
, no_extend
);
1686 /* The second operand of a shift must always be extended. */
1687 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1688 no_extend
&& binoptab
!= ashl_optab
);
1690 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1691 unsignedp
, methods
);
1694 if (class != MODE_INT
)
1697 target
= gen_reg_rtx (mode
);
1698 convert_move (target
, temp
, 0);
1702 return gen_lowpart (mode
, temp
);
1705 delete_insns_since (last
);
1710 delete_insns_since (entry_last
);
1714 /* Expand a binary operator which has both signed and unsigned forms.
1715 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1718 If we widen unsigned operands, we may use a signed wider operation instead
1719 of an unsigned wider operation, since the result would be the same. */
1722 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1723 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1724 enum optab_methods methods
)
1727 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1728 struct optab wide_soptab
;
1730 /* Do it without widening, if possible. */
1731 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1732 unsignedp
, OPTAB_DIRECT
);
1733 if (temp
|| methods
== OPTAB_DIRECT
)
1736 /* Try widening to a signed int. Make a fake signed optab that
1737 hides any signed insn for direct use. */
1738 wide_soptab
= *soptab
;
1739 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1740 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1742 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1743 unsignedp
, OPTAB_WIDEN
);
1745 /* For unsigned operands, try widening to an unsigned int. */
1746 if (temp
== 0 && unsignedp
)
1747 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1748 unsignedp
, OPTAB_WIDEN
);
1749 if (temp
|| methods
== OPTAB_WIDEN
)
1752 /* Use the right width lib call if that exists. */
1753 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1754 if (temp
|| methods
== OPTAB_LIB
)
1757 /* Must widen and use a lib call, use either signed or unsigned. */
1758 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1759 unsignedp
, methods
);
1763 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1764 unsignedp
, methods
);
1768 /* Generate code to perform an operation specified by UNOPPTAB
1769 on operand OP0, with two results to TARG0 and TARG1.
1770 We assume that the order of the operands for the instruction
1771 is TARG0, TARG1, OP0.
1773 Either TARG0 or TARG1 may be zero, but what that means is that
1774 the result is not actually wanted. We will generate it into
1775 a dummy pseudo-reg and discard it. They may not both be zero.
1777 Returns 1 if this operation can be performed; 0 if not. */
1780 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1783 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1784 enum mode_class
class;
1785 enum machine_mode wider_mode
;
1786 rtx entry_last
= get_last_insn ();
1789 class = GET_MODE_CLASS (mode
);
1792 op0
= force_not_mem (op0
);
1795 targ0
= gen_reg_rtx (mode
);
1797 targ1
= gen_reg_rtx (mode
);
1799 /* Record where to go back to if we fail. */
1800 last
= get_last_insn ();
1802 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1804 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1805 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1809 if (GET_MODE (xop0
) != VOIDmode
1810 && GET_MODE (xop0
) != mode0
)
1811 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1813 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1814 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1815 xop0
= copy_to_mode_reg (mode0
, xop0
);
1817 /* We could handle this, but we should always be called with a pseudo
1818 for our targets and all insns should take them as outputs. */
1819 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1820 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1823 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1830 delete_insns_since (last
);
1833 /* It can't be done in this mode. Can we do it in a wider mode? */
1835 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1837 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1838 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1840 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1841 != CODE_FOR_nothing
)
1843 rtx t0
= gen_reg_rtx (wider_mode
);
1844 rtx t1
= gen_reg_rtx (wider_mode
);
1845 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1847 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1849 convert_move (targ0
, t0
, unsignedp
);
1850 convert_move (targ1
, t1
, unsignedp
);
1854 delete_insns_since (last
);
1859 delete_insns_since (entry_last
);
1863 /* Generate code to perform an operation specified by BINOPTAB
1864 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1865 We assume that the order of the operands for the instruction
1866 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1867 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1869 Either TARG0 or TARG1 may be zero, but what that means is that
1870 the result is not actually wanted. We will generate it into
1871 a dummy pseudo-reg and discard it. They may not both be zero.
1873 Returns 1 if this operation can be performed; 0 if not. */
1876 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1879 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1880 enum mode_class
class;
1881 enum machine_mode wider_mode
;
1882 rtx entry_last
= get_last_insn ();
1885 class = GET_MODE_CLASS (mode
);
1889 op0
= force_not_mem (op0
);
1890 op1
= force_not_mem (op1
);
1893 /* If we are inside an appropriately-short loop and we are optimizing,
1894 force expensive constants into a register. */
1895 if (CONSTANT_P (op0
) && optimize
1896 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1897 op0
= force_reg (mode
, op0
);
1899 if (CONSTANT_P (op1
) && optimize
1900 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1901 op1
= force_reg (mode
, op1
);
1904 targ0
= gen_reg_rtx (mode
);
1906 targ1
= gen_reg_rtx (mode
);
1908 /* Record where to go back to if we fail. */
1909 last
= get_last_insn ();
1911 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1913 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1914 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1915 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1917 rtx xop0
= op0
, xop1
= op1
;
1919 /* In case the insn wants input operands in modes different from
1920 those of the actual operands, convert the operands. It would
1921 seem that we don't need to convert CONST_INTs, but we do, so
1922 that they're properly zero-extended, sign-extended or truncated
1925 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1926 xop0
= convert_modes (mode0
,
1927 GET_MODE (op0
) != VOIDmode
1932 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1933 xop1
= convert_modes (mode1
,
1934 GET_MODE (op1
) != VOIDmode
1939 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1940 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1941 xop0
= copy_to_mode_reg (mode0
, xop0
);
1943 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1944 xop1
= copy_to_mode_reg (mode1
, xop1
);
1946 /* We could handle this, but we should always be called with a pseudo
1947 for our targets and all insns should take them as outputs. */
1948 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1949 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1952 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1959 delete_insns_since (last
);
1962 /* It can't be done in this mode. Can we do it in a wider mode? */
1964 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1966 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1967 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1969 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1970 != CODE_FOR_nothing
)
1972 rtx t0
= gen_reg_rtx (wider_mode
);
1973 rtx t1
= gen_reg_rtx (wider_mode
);
1974 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1975 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1977 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1980 convert_move (targ0
, t0
, unsignedp
);
1981 convert_move (targ1
, t1
, unsignedp
);
1985 delete_insns_since (last
);
1990 delete_insns_since (entry_last
);
1994 /* Expand the two-valued library call indicated by BINOPTAB, but
1995 preserve only one of the values. If TARG0 is non-NULL, the first
1996 value is placed into TARG0; otherwise the second value is placed
1997 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
1998 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
1999 This routine assumes that the value returned by the library call is
2000 as if the return value was of an integral mode twice as wide as the
2001 mode of OP0. Returns 1 if the call was successful. */
2004 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2005 rtx targ0
, rtx targ1
, enum rtx_code code
)
2007 enum machine_mode mode
;
2008 enum machine_mode libval_mode
;
2012 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2013 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2016 mode
= GET_MODE (op0
);
2017 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2020 /* The value returned by the library function will have twice as
2021 many bits as the nominal MODE. */
2022 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2025 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2026 NULL_RTX
, LCT_CONST
,
2030 /* Get the part of VAL containing the value that we want. */
2031 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2032 targ0
? 0 : GET_MODE_SIZE (mode
));
2033 insns
= get_insns ();
2035 /* Move the into the desired location. */
2036 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2037 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2043 /* Wrapper around expand_unop which takes an rtx code to specify
2044 the operation to perform, not an optab pointer. All other
2045 arguments are the same. */
2047 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2048 rtx target
, int unsignedp
)
2050 optab unop
= code_to_optab
[(int) code
];
2054 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2060 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2062 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2064 enum mode_class
class = GET_MODE_CLASS (mode
);
2065 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2067 enum machine_mode wider_mode
;
2068 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2069 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2071 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2072 != CODE_FOR_nothing
)
2074 rtx xop0
, temp
, last
;
2076 last
= get_last_insn ();
2079 target
= gen_reg_rtx (mode
);
2080 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2081 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2083 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2084 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2085 - GET_MODE_BITSIZE (mode
)),
2086 target
, true, OPTAB_DIRECT
);
2088 delete_insns_since (last
);
2097 /* Try calculating (parity x) as (and (popcount x) 1), where
2098 popcount can also be done in a wider mode. */
2100 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2102 enum mode_class
class = GET_MODE_CLASS (mode
);
2103 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2105 enum machine_mode wider_mode
;
2106 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2107 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2109 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2110 != CODE_FOR_nothing
)
2112 rtx xop0
, temp
, last
;
2114 last
= get_last_insn ();
2117 target
= gen_reg_rtx (mode
);
2118 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2119 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2122 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2123 target
, true, OPTAB_DIRECT
);
2125 delete_insns_since (last
);
2134 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2135 conditions, VAL may already be a SUBREG against which we cannot generate
2136 a further SUBREG. In this case, we expect forcing the value into a
2137 register will work around the situation. */
2140 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2141 enum machine_mode imode
)
2144 ret
= lowpart_subreg (omode
, val
, imode
);
2147 val
= force_reg (imode
, val
);
2148 ret
= lowpart_subreg (omode
, val
, imode
);
2149 gcc_assert (ret
!= NULL
);
2154 /* Generate code to perform an operation specified by UNOPTAB
2155 on operand OP0, with result having machine-mode MODE.
2157 UNSIGNEDP is for the case where we have to widen the operands
2158 to perform the operation. It says to use zero-extension.
2160 If TARGET is nonzero, the value
2161 is generated there, if it is convenient to do so.
2162 In all cases an rtx is returned for the locus of the value;
2163 this may or may not be TARGET. */
2166 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2169 enum mode_class
class;
2170 enum machine_mode wider_mode
;
2172 rtx last
= get_last_insn ();
2175 class = GET_MODE_CLASS (mode
);
2178 op0
= force_not_mem (op0
);
2180 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2182 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2183 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2189 temp
= gen_reg_rtx (mode
);
2191 if (GET_MODE (xop0
) != VOIDmode
2192 && GET_MODE (xop0
) != mode0
)
2193 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2195 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2197 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2198 xop0
= copy_to_mode_reg (mode0
, xop0
);
2200 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2201 temp
= gen_reg_rtx (mode
);
2203 pat
= GEN_FCN (icode
) (temp
, xop0
);
2206 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2207 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2209 delete_insns_since (last
);
2210 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2218 delete_insns_since (last
);
2221 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2223 /* Widening clz needs special treatment. */
2224 if (unoptab
== clz_optab
)
2226 temp
= widen_clz (mode
, op0
, target
);
2233 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2234 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2235 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2237 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2241 /* For certain operations, we need not actually extend
2242 the narrow operand, as long as we will truncate the
2243 results to the same narrowness. */
2245 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2246 (unoptab
== neg_optab
2247 || unoptab
== one_cmpl_optab
)
2248 && class == MODE_INT
);
2250 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2255 if (class != MODE_INT
)
2258 target
= gen_reg_rtx (mode
);
2259 convert_move (target
, temp
, 0);
2263 return gen_lowpart (mode
, temp
);
2266 delete_insns_since (last
);
2270 /* These can be done a word at a time. */
2271 if (unoptab
== one_cmpl_optab
2272 && class == MODE_INT
2273 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2274 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2279 if (target
== 0 || target
== op0
)
2280 target
= gen_reg_rtx (mode
);
2284 /* Do the actual arithmetic. */
2285 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2287 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2288 rtx x
= expand_unop (word_mode
, unoptab
,
2289 operand_subword_force (op0
, i
, mode
),
2290 target_piece
, unsignedp
);
2292 if (target_piece
!= x
)
2293 emit_move_insn (target_piece
, x
);
2296 insns
= get_insns ();
2299 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2300 gen_rtx_fmt_e (unoptab
->code
, mode
,
2305 /* Try negating floating point values by flipping the sign bit. */
2306 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2307 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2309 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2310 enum machine_mode imode
= int_mode_for_mode (mode
);
2311 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2313 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2315 HOST_WIDE_INT hi
, lo
;
2316 rtx last
= get_last_insn ();
2318 /* Handle targets with different FP word orders. */
2319 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2321 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2322 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2323 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2326 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2329 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2333 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2336 temp
= expand_binop (imode
, xor_optab
,
2337 gen_lowpart (imode
, op0
),
2338 immed_double_const (lo
, hi
, imode
),
2339 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2344 target
= gen_reg_rtx (mode
);
2345 temp
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2346 insn
= emit_move_insn (target
, temp
);
2347 set_unique_reg_note (insn
, REG_EQUAL
,
2348 gen_rtx_fmt_e (NEG
, mode
,
2352 delete_insns_since (last
);
2356 /* Try calculating parity (x) as popcount (x) % 2. */
2357 if (unoptab
== parity_optab
)
2359 temp
= expand_parity (mode
, op0
, target
);
2364 /* If there is no negation pattern, try subtracting from zero. */
2365 if (unoptab
== neg_optab
&& class == MODE_INT
)
2367 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2368 target
, unsignedp
, OPTAB_DIRECT
);
2374 /* Now try a library call in this mode. */
2375 if (unoptab
->handlers
[(int) mode
].libfunc
)
2379 enum machine_mode outmode
= mode
;
2381 /* All of these functions return small values. Thus we choose to
2382 have them return something that isn't a double-word. */
2383 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2384 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2386 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2390 /* Pass 1 for NO_QUEUE so we don't lose any increments
2391 if the libcall is cse'd or moved. */
2392 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2393 NULL_RTX
, LCT_CONST
, outmode
,
2395 insns
= get_insns ();
2398 target
= gen_reg_rtx (outmode
);
2399 emit_libcall_block (insns
, target
, value
,
2400 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2405 /* It can't be done in this mode. Can we do it in a wider mode? */
2407 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2409 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2410 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2412 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2413 != CODE_FOR_nothing
)
2414 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2418 /* For certain operations, we need not actually extend
2419 the narrow operand, as long as we will truncate the
2420 results to the same narrowness. */
2422 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2423 (unoptab
== neg_optab
2424 || unoptab
== one_cmpl_optab
)
2425 && class == MODE_INT
);
2427 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2430 /* If we are generating clz using wider mode, adjust the
2432 if (unoptab
== clz_optab
&& temp
!= 0)
2433 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2434 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2435 - GET_MODE_BITSIZE (mode
)),
2436 target
, true, OPTAB_DIRECT
);
2440 if (class != MODE_INT
)
2443 target
= gen_reg_rtx (mode
);
2444 convert_move (target
, temp
, 0);
2448 return gen_lowpart (mode
, temp
);
2451 delete_insns_since (last
);
2456 /* If there is no negate operation, try doing a subtract from zero.
2457 The US Software GOFAST library needs this. FIXME: This is *wrong*
2458 for floating-point operations due to negative zeros! */
2459 if (unoptab
->code
== NEG
)
2462 temp
= expand_binop (mode
,
2463 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2464 CONST0_RTX (mode
), op0
,
2465 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2473 /* Emit code to compute the absolute value of OP0, with result to
2474 TARGET if convenient. (TARGET may be 0.) The return value says
2475 where the result actually is to be found.
2477 MODE is the mode of the operand; the mode of the result is
2478 different but can be deduced from MODE.
2483 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2484 int result_unsignedp
)
2489 result_unsignedp
= 1;
2491 /* First try to do it with a special abs instruction. */
2492 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2497 /* For floating point modes, try clearing the sign bit. */
2498 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2499 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2501 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2502 enum machine_mode imode
= int_mode_for_mode (mode
);
2503 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2505 if (imode
!= BLKmode
&& bitpos
>= 0)
2507 HOST_WIDE_INT hi
, lo
;
2508 rtx last
= get_last_insn ();
2510 /* Handle targets with different FP word orders. */
2511 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2513 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2514 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2515 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2518 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2521 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2525 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2528 temp
= expand_binop (imode
, and_optab
,
2529 gen_lowpart (imode
, op0
),
2530 immed_double_const (~lo
, ~hi
, imode
),
2531 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2536 target
= gen_reg_rtx (mode
);
2537 temp
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2538 insn
= emit_move_insn (target
, temp
);
2539 set_unique_reg_note (insn
, REG_EQUAL
,
2540 gen_rtx_fmt_e (ABS
, mode
,
2544 delete_insns_since (last
);
2548 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2549 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2551 rtx last
= get_last_insn ();
2553 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2555 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2561 delete_insns_since (last
);
2564 /* If this machine has expensive jumps, we can do integer absolute
2565 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2566 where W is the width of MODE. */
2568 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2570 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2571 size_int (GET_MODE_BITSIZE (mode
) - 1),
2574 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2577 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2578 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2588 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2589 int result_unsignedp
, int safe
)
2594 result_unsignedp
= 1;
2596 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2600 /* If that does not win, use conditional jump and negate. */
2602 /* It is safe to use the target if it is the same
2603 as the source if this is also a pseudo register */
2604 if (op0
== target
&& REG_P (op0
)
2605 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2608 op1
= gen_label_rtx ();
2609 if (target
== 0 || ! safe
2610 || GET_MODE (target
) != mode
2611 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2613 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2614 target
= gen_reg_rtx (mode
);
2616 emit_move_insn (target
, op0
);
2619 /* If this mode is an integer too wide to compare properly,
2620 compare word by word. Rely on CSE to optimize constant cases. */
2621 if (GET_MODE_CLASS (mode
) == MODE_INT
2622 && ! can_compare_p (GE
, mode
, ccp_jump
))
2623 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2626 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2627 NULL_RTX
, NULL_RTX
, op1
);
2629 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2632 emit_move_insn (target
, op0
);
2638 /* Generate an instruction whose insn-code is INSN_CODE,
2639 with two operands: an output TARGET and an input OP0.
2640 TARGET *must* be nonzero, and the output is always stored there.
2641 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2642 the value that is stored into TARGET. */
2645 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2648 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2653 /* Sign and zero extension from memory is often done specially on
2654 RISC machines, so forcing into a register here can pessimize
2656 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2657 op0
= force_not_mem (op0
);
2659 /* Now, if insn does not accept our operands, put them into pseudos. */
2661 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2662 op0
= copy_to_mode_reg (mode0
, op0
);
2664 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2665 || (flag_force_mem
&& MEM_P (temp
)))
2666 temp
= gen_reg_rtx (GET_MODE (temp
));
2668 pat
= GEN_FCN (icode
) (temp
, op0
);
2670 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2671 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2676 emit_move_insn (target
, temp
);
2679 /* Emit code to perform a series of operations on a multi-word quantity, one
2682 Such a block is preceded by a CLOBBER of the output, consists of multiple
2683 insns, each setting one word of the output, and followed by a SET copying
2684 the output to itself.
2686 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2687 note indicating that it doesn't conflict with the (also multi-word)
2688 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2691 INSNS is a block of code generated to perform the operation, not including
2692 the CLOBBER and final copy. All insns that compute intermediate values
2693 are first emitted, followed by the block as described above.
2695 TARGET, OP0, and OP1 are the output and inputs of the operations,
2696 respectively. OP1 may be zero for a unary operation.
2698 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2701 If TARGET is not a register, INSNS is simply emitted with no special
2702 processing. Likewise if anything in INSNS is not an INSN or if
2703 there is a libcall block inside INSNS.
2705 The final insn emitted is returned. */
2708 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2710 rtx prev
, next
, first
, last
, insn
;
2712 if (!REG_P (target
) || reload_in_progress
)
2713 return emit_insn (insns
);
2715 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2716 if (!NONJUMP_INSN_P (insn
)
2717 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2718 return emit_insn (insns
);
2720 /* First emit all insns that do not store into words of the output and remove
2721 these from the list. */
2722 for (insn
= insns
; insn
; insn
= next
)
2727 next
= NEXT_INSN (insn
);
2729 /* Some ports (cris) create a libcall regions at their own. We must
2730 avoid any potential nesting of LIBCALLs. */
2731 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2732 remove_note (insn
, note
);
2733 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2734 remove_note (insn
, note
);
2736 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2737 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2738 set
= PATTERN (insn
);
2739 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2741 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2742 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2744 set
= XVECEXP (PATTERN (insn
), 0, i
);
2752 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2754 if (PREV_INSN (insn
))
2755 NEXT_INSN (PREV_INSN (insn
)) = next
;
2760 PREV_INSN (next
) = PREV_INSN (insn
);
2766 prev
= get_last_insn ();
2768 /* Now write the CLOBBER of the output, followed by the setting of each
2769 of the words, followed by the final copy. */
2770 if (target
!= op0
&& target
!= op1
)
2771 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2773 for (insn
= insns
; insn
; insn
= next
)
2775 next
= NEXT_INSN (insn
);
2778 if (op1
&& REG_P (op1
))
2779 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2782 if (op0
&& REG_P (op0
))
2783 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2787 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2788 != CODE_FOR_nothing
)
2790 last
= emit_move_insn (target
, target
);
2792 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2796 last
= get_last_insn ();
2798 /* Remove any existing REG_EQUAL note from "last", or else it will
2799 be mistaken for a note referring to the full contents of the
2800 alleged libcall value when found together with the REG_RETVAL
2801 note added below. An existing note can come from an insn
2802 expansion at "last". */
2803 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2807 first
= get_insns ();
2809 first
= NEXT_INSN (prev
);
2811 /* Encapsulate the block so it gets manipulated as a unit. */
2812 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2814 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
2819 /* Emit code to make a call to a constant function or a library call.
2821 INSNS is a list containing all insns emitted in the call.
2822 These insns leave the result in RESULT. Our block is to copy RESULT
2823 to TARGET, which is logically equivalent to EQUIV.
2825 We first emit any insns that set a pseudo on the assumption that these are
2826 loading constants into registers; doing so allows them to be safely cse'ed
2827 between blocks. Then we emit all the other insns in the block, followed by
2828 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2829 note with an operand of EQUIV.
2831 Moving assignments to pseudos outside of the block is done to improve
2832 the generated code, but is not required to generate correct code,
2833 hence being unable to move an assignment is not grounds for not making
2834 a libcall block. There are two reasons why it is safe to leave these
2835 insns inside the block: First, we know that these pseudos cannot be
2836 used in generated RTL outside the block since they are created for
2837 temporary purposes within the block. Second, CSE will not record the
2838 values of anything set inside a libcall block, so we know they must
2839 be dead at the end of the block.
2841 Except for the first group of insns (the ones setting pseudos), the
2842 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2845 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
2847 rtx final_dest
= target
;
2848 rtx prev
, next
, first
, last
, insn
;
2850 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
2851 into a MEM later. Protect the libcall block from this change. */
2852 if (! REG_P (target
) || REG_USERVAR_P (target
))
2853 target
= gen_reg_rtx (GET_MODE (target
));
2855 /* If we're using non-call exceptions, a libcall corresponding to an
2856 operation that may trap may also trap. */
2857 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
2859 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2862 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2864 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
2865 remove_note (insn
, note
);
2869 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
2870 reg note to indicate that this call cannot throw or execute a nonlocal
2871 goto (unless there is already a REG_EH_REGION note, in which case
2873 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2876 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2879 XEXP (note
, 0) = constm1_rtx
;
2881 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
2885 /* First emit all insns that set pseudos. Remove them from the list as
2886 we go. Avoid insns that set pseudos which were referenced in previous
2887 insns. These can be generated by move_by_pieces, for example,
2888 to update an address. Similarly, avoid insns that reference things
2889 set in previous insns. */
2891 for (insn
= insns
; insn
; insn
= next
)
2893 rtx set
= single_set (insn
);
2896 /* Some ports (cris) create a libcall regions at their own. We must
2897 avoid any potential nesting of LIBCALLs. */
2898 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2899 remove_note (insn
, note
);
2900 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2901 remove_note (insn
, note
);
2903 next
= NEXT_INSN (insn
);
2905 if (set
!= 0 && REG_P (SET_DEST (set
))
2906 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2908 || ((! INSN_P(insns
)
2909 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
2910 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2911 && ! modified_in_p (SET_SRC (set
), insns
)
2912 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2914 if (PREV_INSN (insn
))
2915 NEXT_INSN (PREV_INSN (insn
)) = next
;
2920 PREV_INSN (next
) = PREV_INSN (insn
);
2925 /* Some ports use a loop to copy large arguments onto the stack.
2926 Don't move anything outside such a loop. */
2931 prev
= get_last_insn ();
2933 /* Write the remaining insns followed by the final copy. */
2935 for (insn
= insns
; insn
; insn
= next
)
2937 next
= NEXT_INSN (insn
);
2942 last
= emit_move_insn (target
, result
);
2943 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2944 != CODE_FOR_nothing
)
2945 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
2948 /* Remove any existing REG_EQUAL note from "last", or else it will
2949 be mistaken for a note referring to the full contents of the
2950 libcall value when found together with the REG_RETVAL note added
2951 below. An existing note can come from an insn expansion at
2953 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2956 if (final_dest
!= target
)
2957 emit_move_insn (final_dest
, target
);
2960 first
= get_insns ();
2962 first
= NEXT_INSN (prev
);
2964 /* Encapsulate the block so it gets manipulated as a unit. */
2965 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
2967 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
2968 when the encapsulated region would not be in one basic block,
2969 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
2971 bool attach_libcall_retval_notes
= true;
2972 next
= NEXT_INSN (last
);
2973 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
2974 if (control_flow_insn_p (insn
))
2976 attach_libcall_retval_notes
= false;
2980 if (attach_libcall_retval_notes
)
2982 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2984 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
2990 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
2991 PURPOSE describes how this comparison will be used. CODE is the rtx
2992 comparison code we will be using.
2994 ??? Actually, CODE is slightly weaker than that. A target is still
2995 required to implement all of the normal bcc operations, but not
2996 required to implement all (or any) of the unordered bcc operations. */
2999 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3000 enum can_compare_purpose purpose
)
3004 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3006 if (purpose
== ccp_jump
)
3007 return bcc_gen_fctn
[(int) code
] != NULL
;
3008 else if (purpose
== ccp_store_flag
)
3009 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3011 /* There's only one cmov entry point, and it's allowed to fail. */
3014 if (purpose
== ccp_jump
3015 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3017 if (purpose
== ccp_cmov
3018 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3020 if (purpose
== ccp_store_flag
3021 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3023 mode
= GET_MODE_WIDER_MODE (mode
);
3025 while (mode
!= VOIDmode
);
3030 /* This function is called when we are going to emit a compare instruction that
3031 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3033 *PMODE is the mode of the inputs (in case they are const_int).
3034 *PUNSIGNEDP nonzero says that the operands are unsigned;
3035 this matters if they need to be widened.
3037 If they have mode BLKmode, then SIZE specifies the size of both operands.
3039 This function performs all the setup necessary so that the caller only has
3040 to emit a single comparison insn. This setup can involve doing a BLKmode
3041 comparison or emitting a library call to perform the comparison if no insn
3042 is available to handle it.
3043 The values which are passed in through pointers can be modified; the caller
3044 should perform the comparison on the modified values. */
3047 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3048 enum machine_mode
*pmode
, int *punsignedp
,
3049 enum can_compare_purpose purpose
)
3051 enum machine_mode mode
= *pmode
;
3052 rtx x
= *px
, y
= *py
;
3053 int unsignedp
= *punsignedp
;
3054 enum mode_class
class;
3056 class = GET_MODE_CLASS (mode
);
3058 /* They could both be VOIDmode if both args are immediate constants,
3059 but we should fold that at an earlier stage.
3060 With no special code here, this will call abort,
3061 reminding the programmer to implement such folding. */
3063 if (mode
!= BLKmode
&& flag_force_mem
)
3065 /* Load duplicate non-volatile operands once. */
3066 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3068 x
= force_not_mem (x
);
3073 x
= force_not_mem (x
);
3074 y
= force_not_mem (y
);
3078 /* If we are inside an appropriately-short loop and we are optimizing,
3079 force expensive constants into a register. */
3080 if (CONSTANT_P (x
) && optimize
3081 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3082 x
= force_reg (mode
, x
);
3084 if (CONSTANT_P (y
) && optimize
3085 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3086 y
= force_reg (mode
, y
);
3089 /* Abort if we have a non-canonical comparison. The RTL documentation
3090 states that canonical comparisons are required only for targets which
3092 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3096 /* Don't let both operands fail to indicate the mode. */
3097 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3098 x
= force_reg (mode
, x
);
3100 /* Handle all BLKmode compares. */
3102 if (mode
== BLKmode
)
3104 enum machine_mode cmp_mode
, result_mode
;
3105 enum insn_code cmp_code
;
3110 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3115 /* Try to use a memory block compare insn - either cmpstr
3116 or cmpmem will do. */
3117 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3118 cmp_mode
!= VOIDmode
;
3119 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3121 cmp_code
= cmpmem_optab
[cmp_mode
];
3122 if (cmp_code
== CODE_FOR_nothing
)
3123 cmp_code
= cmpstr_optab
[cmp_mode
];
3124 if (cmp_code
== CODE_FOR_nothing
)
3127 /* Must make sure the size fits the insn's mode. */
3128 if ((GET_CODE (size
) == CONST_INT
3129 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3130 || (GET_MODE_BITSIZE (GET_MODE (size
))
3131 > GET_MODE_BITSIZE (cmp_mode
)))
3134 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3135 result
= gen_reg_rtx (result_mode
);
3136 size
= convert_to_mode (cmp_mode
, size
, 1);
3137 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3141 *pmode
= result_mode
;
3145 /* Otherwise call a library function, memcmp. */
3146 libfunc
= memcmp_libfunc
;
3147 length_type
= sizetype
;
3148 result_mode
= TYPE_MODE (integer_type_node
);
3149 cmp_mode
= TYPE_MODE (length_type
);
3150 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3151 TYPE_UNSIGNED (length_type
));
3153 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3160 *pmode
= result_mode
;
3164 /* Don't allow operands to the compare to trap, as that can put the
3165 compare and branch in different basic blocks. */
3166 if (flag_non_call_exceptions
)
3169 x
= force_reg (mode
, x
);
3171 y
= force_reg (mode
, y
);
3176 if (can_compare_p (*pcomparison
, mode
, purpose
))
3179 /* Handle a lib call just for the mode we are using. */
3181 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3183 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3186 /* If we want unsigned, and this mode has a distinct unsigned
3187 comparison routine, use that. */
3188 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3189 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3191 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3192 word_mode
, 2, x
, mode
, y
, mode
);
3196 if (TARGET_LIB_INT_CMP_BIASED
)
3197 /* Integer comparison returns a result that must be compared
3198 against 1, so that even if we do an unsigned compare
3199 afterward, there is still a value that can represent the
3200 result "less than". */
3210 if (class == MODE_FLOAT
)
3211 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3217 /* Before emitting an insn with code ICODE, make sure that X, which is going
3218 to be used for operand OPNUM of the insn, is converted from mode MODE to
3219 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3220 that it is accepted by the operand predicate. Return the new value. */
3223 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3224 enum machine_mode wider_mode
, int unsignedp
)
3226 if (mode
!= wider_mode
)
3227 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3229 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3230 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3234 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3240 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3241 we can do the comparison.
3242 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3243 be NULL_RTX which indicates that only a comparison is to be generated. */
3246 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3247 enum rtx_code comparison
, int unsignedp
, rtx label
)
3249 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3250 enum mode_class
class = GET_MODE_CLASS (mode
);
3251 enum machine_mode wider_mode
= mode
;
3253 /* Try combined insns first. */
3256 enum insn_code icode
;
3257 PUT_MODE (test
, wider_mode
);
3261 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3263 if (icode
!= CODE_FOR_nothing
3264 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3266 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3267 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3268 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3273 /* Handle some compares against zero. */
3274 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3275 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3277 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3278 emit_insn (GEN_FCN (icode
) (x
));
3280 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3284 /* Handle compares for which there is a directly suitable insn. */
3286 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3287 if (icode
!= CODE_FOR_nothing
)
3289 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3290 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3291 emit_insn (GEN_FCN (icode
) (x
, y
));
3293 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3297 if (class != MODE_INT
&& class != MODE_FLOAT
3298 && class != MODE_COMPLEX_FLOAT
)
3301 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3303 while (wider_mode
!= VOIDmode
);
3308 /* Generate code to compare X with Y so that the condition codes are
3309 set and to jump to LABEL if the condition is true. If X is a
3310 constant and Y is not a constant, then the comparison is swapped to
3311 ensure that the comparison RTL has the canonical form.
3313 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3314 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3315 the proper branch condition code.
3317 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3319 MODE is the mode of the inputs (in case they are const_int).
3321 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3322 be passed unchanged to emit_cmp_insn, then potentially converted into an
3323 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3326 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3327 enum machine_mode mode
, int unsignedp
, rtx label
)
3329 rtx op0
= x
, op1
= y
;
3331 /* Swap operands and condition to ensure canonical RTL. */
3332 if (swap_commutative_operands_p (x
, y
))
3334 /* If we're not emitting a branch, this means some caller
3340 comparison
= swap_condition (comparison
);
3344 /* If OP0 is still a constant, then both X and Y must be constants. Force
3345 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3347 if (CONSTANT_P (op0
))
3348 op0
= force_reg (mode
, op0
);
3352 comparison
= unsigned_condition (comparison
);
3354 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3356 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3359 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3362 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3363 enum machine_mode mode
, int unsignedp
)
3365 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3368 /* Emit a library call comparison between floating point X and Y.
3369 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3372 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3373 enum machine_mode
*pmode
, int *punsignedp
)
3375 enum rtx_code comparison
= *pcomparison
;
3376 enum rtx_code swapped
= swap_condition (comparison
);
3377 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3380 enum machine_mode orig_mode
= GET_MODE (x
);
3381 enum machine_mode mode
;
3382 rtx value
, target
, insns
, equiv
;
3384 bool reversed_p
= false;
3386 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3388 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3391 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3394 tmp
= x
; x
= y
; y
= tmp
;
3395 comparison
= swapped
;
3399 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3400 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3402 comparison
= reversed
;
3408 if (mode
== VOIDmode
)
3411 if (mode
!= orig_mode
)
3413 x
= convert_to_mode (mode
, x
, 0);
3414 y
= convert_to_mode (mode
, y
, 0);
3417 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3418 the RTL. The allows the RTL optimizers to delete the libcall if the
3419 condition can be determined at compile-time. */
3420 if (comparison
== UNORDERED
)
3422 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3423 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3424 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3425 temp
, const_true_rtx
, equiv
);
3429 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3430 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3432 rtx true_rtx
, false_rtx
;
3437 true_rtx
= const0_rtx
;
3438 false_rtx
= const_true_rtx
;
3442 true_rtx
= const_true_rtx
;
3443 false_rtx
= const0_rtx
;
3447 true_rtx
= const1_rtx
;
3448 false_rtx
= const0_rtx
;
3452 true_rtx
= const0_rtx
;
3453 false_rtx
= constm1_rtx
;
3457 true_rtx
= constm1_rtx
;
3458 false_rtx
= const0_rtx
;
3462 true_rtx
= const0_rtx
;
3463 false_rtx
= const1_rtx
;
3469 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3470 equiv
, true_rtx
, false_rtx
);
3475 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3476 word_mode
, 2, x
, mode
, y
, mode
);
3477 insns
= get_insns ();
3480 target
= gen_reg_rtx (word_mode
);
3481 emit_libcall_block (insns
, target
, value
, equiv
);
3483 if (comparison
== UNORDERED
3484 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3485 comparison
= reversed_p
? EQ
: NE
;
3490 *pcomparison
= comparison
;
3494 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3497 emit_indirect_jump (rtx loc
)
3499 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3501 loc
= copy_to_mode_reg (Pmode
, loc
);
3503 emit_jump_insn (gen_indirect_jump (loc
));
3507 #ifdef HAVE_conditional_move
3509 /* Emit a conditional move instruction if the machine supports one for that
3510 condition and machine mode.
3512 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3513 the mode to use should they be constants. If it is VOIDmode, they cannot
3516 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3517 should be stored there. MODE is the mode to use should they be constants.
3518 If it is VOIDmode, they cannot both be constants.
3520 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3521 is not supported. */
3524 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3525 enum machine_mode cmode
, rtx op2
, rtx op3
,
3526 enum machine_mode mode
, int unsignedp
)
3528 rtx tem
, subtarget
, comparison
, insn
;
3529 enum insn_code icode
;
3530 enum rtx_code reversed
;
3532 /* If one operand is constant, make it the second one. Only do this
3533 if the other operand is not constant as well. */
3535 if (swap_commutative_operands_p (op0
, op1
))
3540 code
= swap_condition (code
);
3543 /* get_condition will prefer to generate LT and GT even if the old
3544 comparison was against zero, so undo that canonicalization here since
3545 comparisons against zero are cheaper. */
3546 if (code
== LT
&& op1
== const1_rtx
)
3547 code
= LE
, op1
= const0_rtx
;
3548 else if (code
== GT
&& op1
== constm1_rtx
)
3549 code
= GE
, op1
= const0_rtx
;
3551 if (cmode
== VOIDmode
)
3552 cmode
= GET_MODE (op0
);
3554 if (swap_commutative_operands_p (op2
, op3
)
3555 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3564 if (mode
== VOIDmode
)
3565 mode
= GET_MODE (op2
);
3567 icode
= movcc_gen_code
[mode
];
3569 if (icode
== CODE_FOR_nothing
)
3574 op2
= force_not_mem (op2
);
3575 op3
= force_not_mem (op3
);
3579 target
= gen_reg_rtx (mode
);
3583 /* If the insn doesn't accept these operands, put them in pseudos. */
3585 if (! (*insn_data
[icode
].operand
[0].predicate
)
3586 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3587 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3589 if (! (*insn_data
[icode
].operand
[2].predicate
)
3590 (op2
, insn_data
[icode
].operand
[2].mode
))
3591 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3593 if (! (*insn_data
[icode
].operand
[3].predicate
)
3594 (op3
, insn_data
[icode
].operand
[3].mode
))
3595 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3597 /* Everything should now be in the suitable form, so emit the compare insn
3598 and then the conditional move. */
3601 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3603 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3604 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3605 return NULL and let the caller figure out how best to deal with this
3607 if (GET_CODE (comparison
) != code
)
3610 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3612 /* If that failed, then give up. */
3618 if (subtarget
!= target
)
3619 convert_move (target
, subtarget
, 0);
3624 /* Return nonzero if a conditional move of mode MODE is supported.
3626 This function is for combine so it can tell whether an insn that looks
3627 like a conditional move is actually supported by the hardware. If we
3628 guess wrong we lose a bit on optimization, but that's it. */
3629 /* ??? sparc64 supports conditionally moving integers values based on fp
3630 comparisons, and vice versa. How do we handle them? */
3633 can_conditionally_move_p (enum machine_mode mode
)
3635 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3641 #endif /* HAVE_conditional_move */
3643 /* Emit a conditional addition instruction if the machine supports one for that
3644 condition and machine mode.
3646 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3647 the mode to use should they be constants. If it is VOIDmode, they cannot
3650 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3651 should be stored there. MODE is the mode to use should they be constants.
3652 If it is VOIDmode, they cannot both be constants.
3654 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3655 is not supported. */
3658 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3659 enum machine_mode cmode
, rtx op2
, rtx op3
,
3660 enum machine_mode mode
, int unsignedp
)
3662 rtx tem
, subtarget
, comparison
, insn
;
3663 enum insn_code icode
;
3664 enum rtx_code reversed
;
3666 /* If one operand is constant, make it the second one. Only do this
3667 if the other operand is not constant as well. */
3669 if (swap_commutative_operands_p (op0
, op1
))
3674 code
= swap_condition (code
);
3677 /* get_condition will prefer to generate LT and GT even if the old
3678 comparison was against zero, so undo that canonicalization here since
3679 comparisons against zero are cheaper. */
3680 if (code
== LT
&& op1
== const1_rtx
)
3681 code
= LE
, op1
= const0_rtx
;
3682 else if (code
== GT
&& op1
== constm1_rtx
)
3683 code
= GE
, op1
= const0_rtx
;
3685 if (cmode
== VOIDmode
)
3686 cmode
= GET_MODE (op0
);
3688 if (swap_commutative_operands_p (op2
, op3
)
3689 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3698 if (mode
== VOIDmode
)
3699 mode
= GET_MODE (op2
);
3701 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3703 if (icode
== CODE_FOR_nothing
)
3708 op2
= force_not_mem (op2
);
3709 op3
= force_not_mem (op3
);
3713 target
= gen_reg_rtx (mode
);
3715 /* If the insn doesn't accept these operands, put them in pseudos. */
3717 if (! (*insn_data
[icode
].operand
[0].predicate
)
3718 (target
, insn_data
[icode
].operand
[0].mode
))
3719 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3723 if (! (*insn_data
[icode
].operand
[2].predicate
)
3724 (op2
, insn_data
[icode
].operand
[2].mode
))
3725 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3727 if (! (*insn_data
[icode
].operand
[3].predicate
)
3728 (op3
, insn_data
[icode
].operand
[3].mode
))
3729 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3731 /* Everything should now be in the suitable form, so emit the compare insn
3732 and then the conditional move. */
3735 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3737 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3738 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3739 return NULL and let the caller figure out how best to deal with this
3741 if (GET_CODE (comparison
) != code
)
3744 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3746 /* If that failed, then give up. */
3752 if (subtarget
!= target
)
3753 convert_move (target
, subtarget
, 0);
3758 /* These functions attempt to generate an insn body, rather than
3759 emitting the insn, but if the gen function already emits them, we
3760 make no attempt to turn them back into naked patterns. */
3762 /* Generate and return an insn body to add Y to X. */
3765 gen_add2_insn (rtx x
, rtx y
)
3767 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3769 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3770 (x
, insn_data
[icode
].operand
[0].mode
))
3771 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3772 (x
, insn_data
[icode
].operand
[1].mode
))
3773 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3774 (y
, insn_data
[icode
].operand
[2].mode
)))
3777 return (GEN_FCN (icode
) (x
, x
, y
));
3780 /* Generate and return an insn body to add r1 and c,
3781 storing the result in r0. */
3783 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
3785 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3787 if (icode
== CODE_FOR_nothing
3788 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3789 (r0
, insn_data
[icode
].operand
[0].mode
))
3790 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3791 (r1
, insn_data
[icode
].operand
[1].mode
))
3792 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3793 (c
, insn_data
[icode
].operand
[2].mode
)))
3796 return (GEN_FCN (icode
) (r0
, r1
, c
));
3800 have_add2_insn (rtx x
, rtx y
)
3804 if (GET_MODE (x
) == VOIDmode
)
3807 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3809 if (icode
== CODE_FOR_nothing
)
3812 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3813 (x
, insn_data
[icode
].operand
[0].mode
))
3814 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3815 (x
, insn_data
[icode
].operand
[1].mode
))
3816 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3817 (y
, insn_data
[icode
].operand
[2].mode
)))
3823 /* Generate and return an insn body to subtract Y from X. */
3826 gen_sub2_insn (rtx x
, rtx y
)
3828 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3830 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3831 (x
, insn_data
[icode
].operand
[0].mode
))
3832 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3833 (x
, insn_data
[icode
].operand
[1].mode
))
3834 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3835 (y
, insn_data
[icode
].operand
[2].mode
)))
3838 return (GEN_FCN (icode
) (x
, x
, y
));
3841 /* Generate and return an insn body to subtract r1 and c,
3842 storing the result in r0. */
3844 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
3846 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3848 if (icode
== CODE_FOR_nothing
3849 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3850 (r0
, insn_data
[icode
].operand
[0].mode
))
3851 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3852 (r1
, insn_data
[icode
].operand
[1].mode
))
3853 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3854 (c
, insn_data
[icode
].operand
[2].mode
)))
3857 return (GEN_FCN (icode
) (r0
, r1
, c
));
3861 have_sub2_insn (rtx x
, rtx y
)
3865 if (GET_MODE (x
) == VOIDmode
)
3868 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3870 if (icode
== CODE_FOR_nothing
)
3873 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3874 (x
, insn_data
[icode
].operand
[0].mode
))
3875 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3876 (x
, insn_data
[icode
].operand
[1].mode
))
3877 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3878 (y
, insn_data
[icode
].operand
[2].mode
)))
3884 /* Generate the body of an instruction to copy Y into X.
3885 It may be a list of insns, if one insn isn't enough. */
3888 gen_move_insn (rtx x
, rtx y
)
3893 emit_move_insn_1 (x
, y
);
3899 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3900 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3901 no such operation exists, CODE_FOR_nothing will be returned. */
3904 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
3908 #ifdef HAVE_ptr_extend
3910 return CODE_FOR_ptr_extend
;
3913 tab
= unsignedp
? zext_optab
: sext_optab
;
3914 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
3917 /* Generate the body of an insn to extend Y (with mode MFROM)
3918 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3921 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
3922 enum machine_mode mfrom
, int unsignedp
)
3924 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
3925 return GEN_FCN (icode
) (x
, y
);
3928 /* can_fix_p and can_float_p say whether the target machine
3929 can directly convert a given fixed point type to
3930 a given floating point type, or vice versa.
3931 The returned value is the CODE_FOR_... value to use,
3932 or CODE_FOR_nothing if these modes cannot be directly converted.
3934 *TRUNCP_PTR is set to 1 if it is necessary to output
3935 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3937 static enum insn_code
3938 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
3939 int unsignedp
, int *truncp_ptr
)
3942 enum insn_code icode
;
3944 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
3945 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
3946 if (icode
!= CODE_FOR_nothing
)
3952 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
3953 for this to work. We need to rework the fix* and ftrunc* patterns
3954 and documentation. */
3955 tab
= unsignedp
? ufix_optab
: sfix_optab
;
3956 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
3957 if (icode
!= CODE_FOR_nothing
3958 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
3965 return CODE_FOR_nothing
;
3968 static enum insn_code
3969 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
3974 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
3975 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
3978 /* Generate code to convert FROM to floating point
3979 and store in TO. FROM must be fixed point and not VOIDmode.
3980 UNSIGNEDP nonzero means regard FROM as unsigned.
3981 Normally this is done by correcting the final value
3982 if it is negative. */
3985 expand_float (rtx to
, rtx from
, int unsignedp
)
3987 enum insn_code icode
;
3989 enum machine_mode fmode
, imode
;
3991 /* Crash now, because we won't be able to decide which mode to use. */
3992 if (GET_MODE (from
) == VOIDmode
)
3995 /* Look for an insn to do the conversion. Do it in the specified
3996 modes if possible; otherwise convert either input, output or both to
3997 wider mode. If the integer mode is wider than the mode of FROM,
3998 we can do the conversion signed even if the input is unsigned. */
4000 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4001 fmode
= GET_MODE_WIDER_MODE (fmode
))
4002 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4003 imode
= GET_MODE_WIDER_MODE (imode
))
4005 int doing_unsigned
= unsignedp
;
4007 if (fmode
!= GET_MODE (to
)
4008 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4011 icode
= can_float_p (fmode
, imode
, unsignedp
);
4012 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4013 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4015 if (icode
!= CODE_FOR_nothing
)
4017 if (imode
!= GET_MODE (from
))
4018 from
= convert_to_mode (imode
, from
, unsignedp
);
4020 if (fmode
!= GET_MODE (to
))
4021 target
= gen_reg_rtx (fmode
);
4023 emit_unop_insn (icode
, target
, from
,
4024 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4027 convert_move (to
, target
, 0);
4032 /* Unsigned integer, and no way to convert directly.
4033 Convert as signed, then conditionally adjust the result. */
4036 rtx label
= gen_label_rtx ();
4038 REAL_VALUE_TYPE offset
;
4041 from
= force_not_mem (from
);
4043 /* Look for a usable floating mode FMODE wider than the source and at
4044 least as wide as the target. Using FMODE will avoid rounding woes
4045 with unsigned values greater than the signed maximum value. */
4047 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4048 fmode
= GET_MODE_WIDER_MODE (fmode
))
4049 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4050 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4053 if (fmode
== VOIDmode
)
4055 /* There is no such mode. Pretend the target is wide enough. */
4056 fmode
= GET_MODE (to
);
4058 /* Avoid double-rounding when TO is narrower than FROM. */
4059 if ((significand_size (fmode
) + 1)
4060 < GET_MODE_BITSIZE (GET_MODE (from
)))
4063 rtx neglabel
= gen_label_rtx ();
4065 /* Don't use TARGET if it isn't a register, is a hard register,
4066 or is the wrong mode. */
4068 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4069 || GET_MODE (target
) != fmode
)
4070 target
= gen_reg_rtx (fmode
);
4072 imode
= GET_MODE (from
);
4073 do_pending_stack_adjust ();
4075 /* Test whether the sign bit is set. */
4076 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4079 /* The sign bit is not set. Convert as signed. */
4080 expand_float (target
, from
, 0);
4081 emit_jump_insn (gen_jump (label
));
4084 /* The sign bit is set.
4085 Convert to a usable (positive signed) value by shifting right
4086 one bit, while remembering if a nonzero bit was shifted
4087 out; i.e., compute (from & 1) | (from >> 1). */
4089 emit_label (neglabel
);
4090 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4091 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4092 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4094 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4096 expand_float (target
, temp
, 0);
4098 /* Multiply by 2 to undo the shift above. */
4099 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4100 target
, 0, OPTAB_LIB_WIDEN
);
4102 emit_move_insn (target
, temp
);
4104 do_pending_stack_adjust ();
4110 /* If we are about to do some arithmetic to correct for an
4111 unsigned operand, do it in a pseudo-register. */
4113 if (GET_MODE (to
) != fmode
4114 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4115 target
= gen_reg_rtx (fmode
);
4117 /* Convert as signed integer to floating. */
4118 expand_float (target
, from
, 0);
4120 /* If FROM is negative (and therefore TO is negative),
4121 correct its value by 2**bitwidth. */
4123 do_pending_stack_adjust ();
4124 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4128 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4129 temp
= expand_binop (fmode
, add_optab
, target
,
4130 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4131 target
, 0, OPTAB_LIB_WIDEN
);
4133 emit_move_insn (target
, temp
);
4135 do_pending_stack_adjust ();
4140 /* No hardware instruction available; call a library routine. */
4145 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4147 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4148 from
= convert_to_mode (SImode
, from
, unsignedp
);
4151 from
= force_not_mem (from
);
4153 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4159 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4160 GET_MODE (to
), 1, from
,
4162 insns
= get_insns ();
4165 emit_libcall_block (insns
, target
, value
,
4166 gen_rtx_FLOAT (GET_MODE (to
), from
));
4171 /* Copy result to requested destination
4172 if we have been computing in a temp location. */
4176 if (GET_MODE (target
) == GET_MODE (to
))
4177 emit_move_insn (to
, target
);
4179 convert_move (to
, target
, 0);
4183 /* Generate code to convert FROM to fixed point and store in TO. FROM
4184 must be floating point. */
4187 expand_fix (rtx to
, rtx from
, int unsignedp
)
4189 enum insn_code icode
;
4191 enum machine_mode fmode
, imode
;
4194 /* We first try to find a pair of modes, one real and one integer, at
4195 least as wide as FROM and TO, respectively, in which we can open-code
4196 this conversion. If the integer mode is wider than the mode of TO,
4197 we can do the conversion either signed or unsigned. */
4199 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4200 fmode
= GET_MODE_WIDER_MODE (fmode
))
4201 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4202 imode
= GET_MODE_WIDER_MODE (imode
))
4204 int doing_unsigned
= unsignedp
;
4206 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4207 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4208 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4210 if (icode
!= CODE_FOR_nothing
)
4212 if (fmode
!= GET_MODE (from
))
4213 from
= convert_to_mode (fmode
, from
, 0);
4217 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4218 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4222 if (imode
!= GET_MODE (to
))
4223 target
= gen_reg_rtx (imode
);
4225 emit_unop_insn (icode
, target
, from
,
4226 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4228 convert_move (to
, target
, unsignedp
);
4233 /* For an unsigned conversion, there is one more way to do it.
4234 If we have a signed conversion, we generate code that compares
4235 the real value to the largest representable positive number. If if
4236 is smaller, the conversion is done normally. Otherwise, subtract
4237 one plus the highest signed number, convert, and add it back.
4239 We only need to check all real modes, since we know we didn't find
4240 anything with a wider integer mode.
4242 This code used to extend FP value into mode wider than the destination.
4243 This is not needed. Consider, for instance conversion from SFmode
4246 The hot path trought the code is dealing with inputs smaller than 2^63
4247 and doing just the conversion, so there is no bits to lose.
4249 In the other path we know the value is positive in the range 2^63..2^64-1
4250 inclusive. (as for other imput overflow happens and result is undefined)
4251 So we know that the most important bit set in mantissa corresponds to
4252 2^63. The subtraction of 2^63 should not generate any rounding as it
4253 simply clears out that bit. The rest is trivial. */
4255 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4256 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4257 fmode
= GET_MODE_WIDER_MODE (fmode
))
4258 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4262 REAL_VALUE_TYPE offset
;
4263 rtx limit
, lab1
, lab2
, insn
;
4265 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4266 real_2expN (&offset
, bitsize
- 1);
4267 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4268 lab1
= gen_label_rtx ();
4269 lab2
= gen_label_rtx ();
4272 from
= force_not_mem (from
);
4274 if (fmode
!= GET_MODE (from
))
4275 from
= convert_to_mode (fmode
, from
, 0);
4277 /* See if we need to do the subtraction. */
4278 do_pending_stack_adjust ();
4279 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4282 /* If not, do the signed "fix" and branch around fixup code. */
4283 expand_fix (to
, from
, 0);
4284 emit_jump_insn (gen_jump (lab2
));
4287 /* Otherwise, subtract 2**(N-1), convert to signed number,
4288 then add 2**(N-1). Do the addition using XOR since this
4289 will often generate better code. */
4291 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4292 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4293 expand_fix (to
, target
, 0);
4294 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4296 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4298 to
, 1, OPTAB_LIB_WIDEN
);
4301 emit_move_insn (to
, target
);
4305 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4306 != CODE_FOR_nothing
)
4308 /* Make a place for a REG_NOTE and add it. */
4309 insn
= emit_move_insn (to
, to
);
4310 set_unique_reg_note (insn
,
4312 gen_rtx_fmt_e (UNSIGNED_FIX
,
4320 /* We can't do it with an insn, so use a library call. But first ensure
4321 that the mode of TO is at least as wide as SImode, since those are the
4322 only library calls we know about. */
4324 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4326 target
= gen_reg_rtx (SImode
);
4328 expand_fix (target
, from
, unsignedp
);
4336 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4337 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4342 from
= force_not_mem (from
);
4346 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4347 GET_MODE (to
), 1, from
,
4349 insns
= get_insns ();
4352 emit_libcall_block (insns
, target
, value
,
4353 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4354 GET_MODE (to
), from
));
4359 if (GET_MODE (to
) == GET_MODE (target
))
4360 emit_move_insn (to
, target
);
4362 convert_move (to
, target
, 0);
4366 /* Report whether we have an instruction to perform the operation
4367 specified by CODE on operands of mode MODE. */
4369 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4371 return (code_to_optab
[(int) code
] != 0
4372 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4373 != CODE_FOR_nothing
));
4376 /* Create a blank optab. */
4381 optab op
= ggc_alloc (sizeof (struct optab
));
4382 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4384 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4385 op
->handlers
[i
].libfunc
= 0;
4391 static convert_optab
4392 new_convert_optab (void)
4395 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4396 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4397 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4399 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4400 op
->handlers
[i
][j
].libfunc
= 0;
4405 /* Same, but fill in its code as CODE, and write it into the
4406 code_to_optab table. */
4408 init_optab (enum rtx_code code
)
4410 optab op
= new_optab ();
4412 code_to_optab
[(int) code
] = op
;
4416 /* Same, but fill in its code as CODE, and do _not_ write it into
4417 the code_to_optab table. */
4419 init_optabv (enum rtx_code code
)
4421 optab op
= new_optab ();
4426 /* Conversion optabs never go in the code_to_optab table. */
4427 static inline convert_optab
4428 init_convert_optab (enum rtx_code code
)
4430 convert_optab op
= new_convert_optab ();
4435 /* Initialize the libfunc fields of an entire group of entries in some
4436 optab. Each entry is set equal to a string consisting of a leading
4437 pair of underscores followed by a generic operation name followed by
4438 a mode name (downshifted to lowercase) followed by a single character
4439 representing the number of operands for the given operation (which is
4440 usually one of the characters '2', '3', or '4').
4442 OPTABLE is the table in which libfunc fields are to be initialized.
4443 FIRST_MODE is the first machine mode index in the given optab to
4445 LAST_MODE is the last machine mode index in the given optab to
4447 OPNAME is the generic (string) name of the operation.
4448 SUFFIX is the character which specifies the number of operands for
4449 the given generic operation.
4453 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4454 const char *opname
, int suffix
)
4457 unsigned opname_len
= strlen (opname
);
4459 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4460 mode
= (enum machine_mode
) ((int) mode
+ 1))
4462 const char *mname
= GET_MODE_NAME (mode
);
4463 unsigned mname_len
= strlen (mname
);
4464 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4471 for (q
= opname
; *q
; )
4473 for (q
= mname
; *q
; q
++)
4474 *p
++ = TOLOWER (*q
);
4478 optable
->handlers
[(int) mode
].libfunc
4479 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4483 /* Initialize the libfunc fields of an entire group of entries in some
4484 optab which correspond to all integer mode operations. The parameters
4485 have the same meaning as similarly named ones for the `init_libfuncs'
4486 routine. (See above). */
4489 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4491 int maxsize
= 2*BITS_PER_WORD
;
4492 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4493 maxsize
= LONG_LONG_TYPE_SIZE
;
4494 init_libfuncs (optable
, word_mode
,
4495 mode_for_size (maxsize
, MODE_INT
, 0),
4499 /* Initialize the libfunc fields of an entire group of entries in some
4500 optab which correspond to all real mode operations. The parameters
4501 have the same meaning as similarly named ones for the `init_libfuncs'
4502 routine. (See above). */
4505 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4507 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4510 /* Initialize the libfunc fields of an entire group of entries of an
4511 inter-mode-class conversion optab. The string formation rules are
4512 similar to the ones for init_libfuncs, above, but instead of having
4513 a mode name and an operand count these functions have two mode names
4514 and no operand count. */
4516 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4517 enum mode_class from_class
,
4518 enum mode_class to_class
)
4520 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4521 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4522 size_t opname_len
= strlen (opname
);
4523 size_t max_mname_len
= 0;
4525 enum machine_mode fmode
, tmode
;
4526 const char *fname
, *tname
;
4528 char *libfunc_name
, *suffix
;
4531 for (fmode
= first_from_mode
;
4533 fmode
= GET_MODE_WIDER_MODE (fmode
))
4534 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4536 for (tmode
= first_to_mode
;
4538 tmode
= GET_MODE_WIDER_MODE (tmode
))
4539 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4541 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4542 libfunc_name
[0] = '_';
4543 libfunc_name
[1] = '_';
4544 memcpy (&libfunc_name
[2], opname
, opname_len
);
4545 suffix
= libfunc_name
+ opname_len
+ 2;
4547 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4548 fmode
= GET_MODE_WIDER_MODE (fmode
))
4549 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4550 tmode
= GET_MODE_WIDER_MODE (tmode
))
4552 fname
= GET_MODE_NAME (fmode
);
4553 tname
= GET_MODE_NAME (tmode
);
4556 for (q
= fname
; *q
; p
++, q
++)
4558 for (q
= tname
; *q
; p
++, q
++)
4563 tab
->handlers
[tmode
][fmode
].libfunc
4564 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4569 /* Initialize the libfunc fields of an entire group of entries of an
4570 intra-mode-class conversion optab. The string formation rules are
4571 similar to the ones for init_libfunc, above. WIDENING says whether
4572 the optab goes from narrow to wide modes or vice versa. These functions
4573 have two mode names _and_ an operand count. */
4575 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4576 enum mode_class
class, bool widening
)
4578 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4579 size_t opname_len
= strlen (opname
);
4580 size_t max_mname_len
= 0;
4582 enum machine_mode nmode
, wmode
;
4583 const char *nname
, *wname
;
4585 char *libfunc_name
, *suffix
;
4588 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4589 nmode
= GET_MODE_WIDER_MODE (nmode
))
4590 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4592 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4593 libfunc_name
[0] = '_';
4594 libfunc_name
[1] = '_';
4595 memcpy (&libfunc_name
[2], opname
, opname_len
);
4596 suffix
= libfunc_name
+ opname_len
+ 2;
4598 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4599 nmode
= GET_MODE_WIDER_MODE (nmode
))
4600 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4601 wmode
= GET_MODE_WIDER_MODE (wmode
))
4603 nname
= GET_MODE_NAME (nmode
);
4604 wname
= GET_MODE_NAME (wmode
);
4607 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4609 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4615 tab
->handlers
[widening
? wmode
: nmode
]
4616 [widening
? nmode
: wmode
].libfunc
4617 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4624 init_one_libfunc (const char *name
)
4628 /* Create a FUNCTION_DECL that can be passed to
4629 targetm.encode_section_info. */
4630 /* ??? We don't have any type information except for this is
4631 a function. Pretend this is "int foo()". */
4632 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4633 build_function_type (integer_type_node
, NULL_TREE
));
4634 DECL_ARTIFICIAL (decl
) = 1;
4635 DECL_EXTERNAL (decl
) = 1;
4636 TREE_PUBLIC (decl
) = 1;
4638 symbol
= XEXP (DECL_RTL (decl
), 0);
4640 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4641 are the flags assigned by targetm.encode_section_info. */
4642 SYMBOL_REF_DECL (symbol
) = 0;
4647 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4648 MODE to NAME, which should be either 0 or a string constant. */
4650 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4653 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4655 optable
->handlers
[mode
].libfunc
= 0;
4658 /* Call this to reset the function entry for one conversion optab
4659 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4660 either 0 or a string constant. */
4662 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4663 enum machine_mode fmode
, const char *name
)
4666 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4668 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4671 /* Call this once to initialize the contents of the optabs
4672 appropriately for the current target machine. */
4679 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4681 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4682 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4684 #ifdef HAVE_conditional_move
4685 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4686 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4689 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4691 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4692 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4695 add_optab
= init_optab (PLUS
);
4696 addv_optab
= init_optabv (PLUS
);
4697 sub_optab
= init_optab (MINUS
);
4698 subv_optab
= init_optabv (MINUS
);
4699 smul_optab
= init_optab (MULT
);
4700 smulv_optab
= init_optabv (MULT
);
4701 smul_highpart_optab
= init_optab (UNKNOWN
);
4702 umul_highpart_optab
= init_optab (UNKNOWN
);
4703 smul_widen_optab
= init_optab (UNKNOWN
);
4704 umul_widen_optab
= init_optab (UNKNOWN
);
4705 sdiv_optab
= init_optab (DIV
);
4706 sdivv_optab
= init_optabv (DIV
);
4707 sdivmod_optab
= init_optab (UNKNOWN
);
4708 udiv_optab
= init_optab (UDIV
);
4709 udivmod_optab
= init_optab (UNKNOWN
);
4710 smod_optab
= init_optab (MOD
);
4711 umod_optab
= init_optab (UMOD
);
4712 fmod_optab
= init_optab (UNKNOWN
);
4713 drem_optab
= init_optab (UNKNOWN
);
4714 ftrunc_optab
= init_optab (UNKNOWN
);
4715 and_optab
= init_optab (AND
);
4716 ior_optab
= init_optab (IOR
);
4717 xor_optab
= init_optab (XOR
);
4718 ashl_optab
= init_optab (ASHIFT
);
4719 ashr_optab
= init_optab (ASHIFTRT
);
4720 lshr_optab
= init_optab (LSHIFTRT
);
4721 rotl_optab
= init_optab (ROTATE
);
4722 rotr_optab
= init_optab (ROTATERT
);
4723 smin_optab
= init_optab (SMIN
);
4724 smax_optab
= init_optab (SMAX
);
4725 umin_optab
= init_optab (UMIN
);
4726 umax_optab
= init_optab (UMAX
);
4727 pow_optab
= init_optab (UNKNOWN
);
4728 atan2_optab
= init_optab (UNKNOWN
);
4730 /* These three have codes assigned exclusively for the sake of
4732 mov_optab
= init_optab (SET
);
4733 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4734 cmp_optab
= init_optab (COMPARE
);
4736 ucmp_optab
= init_optab (UNKNOWN
);
4737 tst_optab
= init_optab (UNKNOWN
);
4739 eq_optab
= init_optab (EQ
);
4740 ne_optab
= init_optab (NE
);
4741 gt_optab
= init_optab (GT
);
4742 ge_optab
= init_optab (GE
);
4743 lt_optab
= init_optab (LT
);
4744 le_optab
= init_optab (LE
);
4745 unord_optab
= init_optab (UNORDERED
);
4747 neg_optab
= init_optab (NEG
);
4748 negv_optab
= init_optabv (NEG
);
4749 abs_optab
= init_optab (ABS
);
4750 absv_optab
= init_optabv (ABS
);
4751 addcc_optab
= init_optab (UNKNOWN
);
4752 one_cmpl_optab
= init_optab (NOT
);
4753 ffs_optab
= init_optab (FFS
);
4754 clz_optab
= init_optab (CLZ
);
4755 ctz_optab
= init_optab (CTZ
);
4756 popcount_optab
= init_optab (POPCOUNT
);
4757 parity_optab
= init_optab (PARITY
);
4758 sqrt_optab
= init_optab (SQRT
);
4759 floor_optab
= init_optab (UNKNOWN
);
4760 ceil_optab
= init_optab (UNKNOWN
);
4761 round_optab
= init_optab (UNKNOWN
);
4762 btrunc_optab
= init_optab (UNKNOWN
);
4763 nearbyint_optab
= init_optab (UNKNOWN
);
4764 rint_optab
= init_optab (UNKNOWN
);
4765 sincos_optab
= init_optab (UNKNOWN
);
4766 sin_optab
= init_optab (UNKNOWN
);
4767 asin_optab
= init_optab (UNKNOWN
);
4768 cos_optab
= init_optab (UNKNOWN
);
4769 acos_optab
= init_optab (UNKNOWN
);
4770 exp_optab
= init_optab (UNKNOWN
);
4771 exp10_optab
= init_optab (UNKNOWN
);
4772 exp2_optab
= init_optab (UNKNOWN
);
4773 expm1_optab
= init_optab (UNKNOWN
);
4774 logb_optab
= init_optab (UNKNOWN
);
4775 ilogb_optab
= init_optab (UNKNOWN
);
4776 log_optab
= init_optab (UNKNOWN
);
4777 log10_optab
= init_optab (UNKNOWN
);
4778 log2_optab
= init_optab (UNKNOWN
);
4779 log1p_optab
= init_optab (UNKNOWN
);
4780 tan_optab
= init_optab (UNKNOWN
);
4781 atan_optab
= init_optab (UNKNOWN
);
4782 strlen_optab
= init_optab (UNKNOWN
);
4783 cbranch_optab
= init_optab (UNKNOWN
);
4784 cmov_optab
= init_optab (UNKNOWN
);
4785 cstore_optab
= init_optab (UNKNOWN
);
4786 push_optab
= init_optab (UNKNOWN
);
4788 vec_extract_optab
= init_optab (UNKNOWN
);
4789 vec_set_optab
= init_optab (UNKNOWN
);
4790 vec_init_optab
= init_optab (UNKNOWN
);
4791 vec_realign_load_optab
= init_optab (UNKNOWN
);
4794 sext_optab
= init_convert_optab (SIGN_EXTEND
);
4795 zext_optab
= init_convert_optab (ZERO_EXTEND
);
4796 trunc_optab
= init_convert_optab (TRUNCATE
);
4797 sfix_optab
= init_convert_optab (FIX
);
4798 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
4799 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
4800 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
4801 sfloat_optab
= init_convert_optab (FLOAT
);
4802 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
4804 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4806 movmem_optab
[i
] = CODE_FOR_nothing
;
4807 clrmem_optab
[i
] = CODE_FOR_nothing
;
4808 cmpstr_optab
[i
] = CODE_FOR_nothing
;
4809 cmpmem_optab
[i
] = CODE_FOR_nothing
;
4811 #ifdef HAVE_SECONDARY_RELOADS
4812 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4816 /* Fill in the optabs with the insns we support. */
4819 /* Initialize the optabs with the names of the library functions. */
4820 init_integral_libfuncs (add_optab
, "add", '3');
4821 init_floating_libfuncs (add_optab
, "add", '3');
4822 init_integral_libfuncs (addv_optab
, "addv", '3');
4823 init_floating_libfuncs (addv_optab
, "add", '3');
4824 init_integral_libfuncs (sub_optab
, "sub", '3');
4825 init_floating_libfuncs (sub_optab
, "sub", '3');
4826 init_integral_libfuncs (subv_optab
, "subv", '3');
4827 init_floating_libfuncs (subv_optab
, "sub", '3');
4828 init_integral_libfuncs (smul_optab
, "mul", '3');
4829 init_floating_libfuncs (smul_optab
, "mul", '3');
4830 init_integral_libfuncs (smulv_optab
, "mulv", '3');
4831 init_floating_libfuncs (smulv_optab
, "mul", '3');
4832 init_integral_libfuncs (sdiv_optab
, "div", '3');
4833 init_floating_libfuncs (sdiv_optab
, "div", '3');
4834 init_integral_libfuncs (sdivv_optab
, "divv", '3');
4835 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4836 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4837 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4838 init_integral_libfuncs (smod_optab
, "mod", '3');
4839 init_integral_libfuncs (umod_optab
, "umod", '3');
4840 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4841 init_integral_libfuncs (and_optab
, "and", '3');
4842 init_integral_libfuncs (ior_optab
, "ior", '3');
4843 init_integral_libfuncs (xor_optab
, "xor", '3');
4844 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4845 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4846 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4847 init_integral_libfuncs (smin_optab
, "min", '3');
4848 init_floating_libfuncs (smin_optab
, "min", '3');
4849 init_integral_libfuncs (smax_optab
, "max", '3');
4850 init_floating_libfuncs (smax_optab
, "max", '3');
4851 init_integral_libfuncs (umin_optab
, "umin", '3');
4852 init_integral_libfuncs (umax_optab
, "umax", '3');
4853 init_integral_libfuncs (neg_optab
, "neg", '2');
4854 init_floating_libfuncs (neg_optab
, "neg", '2');
4855 init_integral_libfuncs (negv_optab
, "negv", '2');
4856 init_floating_libfuncs (negv_optab
, "neg", '2');
4857 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4858 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4859 init_integral_libfuncs (clz_optab
, "clz", '2');
4860 init_integral_libfuncs (ctz_optab
, "ctz", '2');
4861 init_integral_libfuncs (popcount_optab
, "popcount", '2');
4862 init_integral_libfuncs (parity_optab
, "parity", '2');
4864 /* Comparison libcalls for integers MUST come in pairs,
4866 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4867 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4868 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4870 /* EQ etc are floating point only. */
4871 init_floating_libfuncs (eq_optab
, "eq", '2');
4872 init_floating_libfuncs (ne_optab
, "ne", '2');
4873 init_floating_libfuncs (gt_optab
, "gt", '2');
4874 init_floating_libfuncs (ge_optab
, "ge", '2');
4875 init_floating_libfuncs (lt_optab
, "lt", '2');
4876 init_floating_libfuncs (le_optab
, "le", '2');
4877 init_floating_libfuncs (unord_optab
, "unord", '2');
4880 init_interclass_conv_libfuncs (sfloat_optab
, "float",
4881 MODE_INT
, MODE_FLOAT
);
4882 init_interclass_conv_libfuncs (sfix_optab
, "fix",
4883 MODE_FLOAT
, MODE_INT
);
4884 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
4885 MODE_FLOAT
, MODE_INT
);
4887 /* sext_optab is also used for FLOAT_EXTEND. */
4888 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
4889 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
4891 /* Use cabs for double complex abs, since systems generally have cabs.
4892 Don't define any libcall for float complex, so that cabs will be used. */
4893 if (complex_double_type_node
)
4894 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
4895 = init_one_libfunc ("cabs");
4897 /* The ffs function operates on `int'. */
4898 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
4899 = init_one_libfunc ("ffs");
4901 abort_libfunc
= init_one_libfunc ("abort");
4902 memcpy_libfunc
= init_one_libfunc ("memcpy");
4903 memmove_libfunc
= init_one_libfunc ("memmove");
4904 memcmp_libfunc
= init_one_libfunc ("memcmp");
4905 memset_libfunc
= init_one_libfunc ("memset");
4906 setbits_libfunc
= init_one_libfunc ("__setbits");
4908 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
4909 ? "_Unwind_SjLj_Resume"
4910 : "_Unwind_Resume");
4911 #ifndef DONT_USE_BUILTIN_SETJMP
4912 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
4913 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
4915 setjmp_libfunc
= init_one_libfunc ("setjmp");
4916 longjmp_libfunc
= init_one_libfunc ("longjmp");
4918 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
4919 unwind_sjlj_unregister_libfunc
4920 = init_one_libfunc ("_Unwind_SjLj_Unregister");
4922 /* For function entry/exit instrumentation. */
4923 profile_function_entry_libfunc
4924 = init_one_libfunc ("__cyg_profile_func_enter");
4925 profile_function_exit_libfunc
4926 = init_one_libfunc ("__cyg_profile_func_exit");
4928 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
4930 if (HAVE_conditional_trap
)
4931 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
4933 /* Allow the target to add more libcalls or rename some, etc. */
4934 targetm
.init_libfuncs ();
4939 /* Print information about the current contents of the optabs on
4943 debug_optab_libfuncs (void)
4949 /* Dump the arithmetic optabs. */
4950 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
4951 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4954 struct optab_handlers
*h
;
4957 h
= &o
->handlers
[j
];
4960 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
4962 fprintf (stderr
, "%s\t%s:\t%s\n",
4963 GET_RTX_NAME (o
->code
),
4965 XSTR (h
->libfunc
, 0));
4969 /* Dump the conversion optabs. */
4970 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
4971 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4972 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
4975 struct optab_handlers
*h
;
4977 o
= &convert_optab_table
[i
];
4978 h
= &o
->handlers
[j
][k
];
4981 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
4983 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
4984 GET_RTX_NAME (o
->code
),
4987 XSTR (h
->libfunc
, 0));
4995 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
4996 CODE. Return 0 on failure. */
4999 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5000 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5002 enum machine_mode mode
= GET_MODE (op1
);
5003 enum insn_code icode
;
5006 if (!HAVE_conditional_trap
)
5009 if (mode
== VOIDmode
)
5012 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5013 if (icode
== CODE_FOR_nothing
)
5017 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5018 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5024 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5026 PUT_CODE (trap_rtx
, code
);
5027 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5031 insn
= get_insns ();
5038 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5039 or unsigned operation code. */
5041 static enum rtx_code
5042 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5054 code
= unsignedp
? LTU
: LT
;
5057 code
= unsignedp
? LEU
: LE
;
5060 code
= unsignedp
? GTU
: GT
;
5063 code
= unsignedp
? GEU
: GE
;
5066 case UNORDERED_EXPR
:
5097 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5098 unsigned operators. Do not generate compare instruction. */
5101 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5103 enum rtx_code rcode
;
5105 rtx rtx_op0
, rtx_op1
;
5107 if (!COMPARISON_CLASS_P (cond
))
5109 /* This is unlikely. While generating VEC_COND_EXPR,
5110 auto vectorizer ensures that condition is a relational
5116 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5117 t_op0
= TREE_OPERAND (cond
, 0);
5118 t_op1
= TREE_OPERAND (cond
, 1);
5121 /* Expand operands. */
5122 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5123 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5125 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5126 && GET_MODE (rtx_op0
) != VOIDmode
)
5127 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5129 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5130 && GET_MODE (rtx_op1
) != VOIDmode
)
5131 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5133 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5136 /* Return insn code for VEC_COND_EXPR EXPR. */
5138 static inline enum insn_code
5139 get_vcond_icode (tree expr
, enum machine_mode mode
)
5141 enum insn_code icode
= CODE_FOR_nothing
;
5143 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5144 icode
= vcondu_gen_code
[mode
];
5146 icode
= vcond_gen_code
[mode
];
5150 /* Return TRUE iff, appropriate vector insns are available
5151 for vector cond expr expr in VMODE mode. */
5154 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5156 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5161 /* Generate insns for VEC_COND_EXPR. */
5164 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5166 enum insn_code icode
;
5167 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5168 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5169 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5171 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5172 if (icode
== CODE_FOR_nothing
)
5176 target
= gen_reg_rtx (mode
);
5178 /* Get comparison rtx. First expand both cond expr operands. */
5179 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5181 cc_op0
= XEXP (comparison
, 0);
5182 cc_op1
= XEXP (comparison
, 1);
5183 /* Expand both operands and force them in reg, if required. */
5184 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5185 NULL_RTX
, VOIDmode
, 1);
5186 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5187 && mode
!= VOIDmode
)
5188 rtx_op1
= force_reg (mode
, rtx_op1
);
5190 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5191 NULL_RTX
, VOIDmode
, 1);
5192 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5193 && mode
!= VOIDmode
)
5194 rtx_op2
= force_reg (mode
, rtx_op2
);
5196 /* Emit instruction! */
5197 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5198 comparison
, cc_op0
, cc_op1
));
5202 #include "gt-optabs.h"