1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
25 #include "insn-flags.h"
26 #include "insn-codes.h"
28 #include "insn-config.h"
33 /* Each optab contains info on how this target machine
34 can perform a particular operation
35 for all sizes and kinds of operands.
37 The operation to be performed is often specified
38 by passing one of these optabs as an argument.
40 See expr.h for documentation of these optabs. */
45 optab smul_highpart_optab
;
46 optab umul_highpart_optab
;
47 optab smul_widen_optab
;
48 optab umul_widen_optab
;
71 optab movstrict_optab
;
82 optab ucmp_optab
; /* Used only for libcalls for unsigned comparisons. */
87 /* Tables of patterns for extending one integer mode to another. */
88 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
90 /* Tables of patterns for converting between fixed and floating point. */
91 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
92 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
93 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
95 /* Contains the optab used for each rtx code. */
96 optab code_to_optab
[NUM_RTX_CODE
+ 1];
98 /* SYMBOL_REF rtx's for the library functions that are called
99 implicitly and not via optabs. */
101 rtx extendsfdf2_libfunc
;
102 rtx extendsfxf2_libfunc
;
103 rtx extendsftf2_libfunc
;
104 rtx extenddfxf2_libfunc
;
105 rtx extenddftf2_libfunc
;
107 rtx truncdfsf2_libfunc
;
108 rtx truncxfsf2_libfunc
;
109 rtx trunctfsf2_libfunc
;
110 rtx truncxfdf2_libfunc
;
111 rtx trunctfdf2_libfunc
;
155 rtx floatsisf_libfunc
;
156 rtx floatdisf_libfunc
;
157 rtx floattisf_libfunc
;
159 rtx floatsidf_libfunc
;
160 rtx floatdidf_libfunc
;
161 rtx floattidf_libfunc
;
163 rtx floatsixf_libfunc
;
164 rtx floatdixf_libfunc
;
165 rtx floattixf_libfunc
;
167 rtx floatsitf_libfunc
;
168 rtx floatditf_libfunc
;
169 rtx floattitf_libfunc
;
187 rtx fixunssfsi_libfunc
;
188 rtx fixunssfdi_libfunc
;
189 rtx fixunssfti_libfunc
;
191 rtx fixunsdfsi_libfunc
;
192 rtx fixunsdfdi_libfunc
;
193 rtx fixunsdfti_libfunc
;
195 rtx fixunsxfsi_libfunc
;
196 rtx fixunsxfdi_libfunc
;
197 rtx fixunsxfti_libfunc
;
199 rtx fixunstfsi_libfunc
;
200 rtx fixunstfdi_libfunc
;
201 rtx fixunstfti_libfunc
;
203 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
204 gives the gen_function to make a branch to test that condition. */
206 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
208 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
209 gives the insn code to make a store-condition insn
210 to test that condition. */
212 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
214 #ifdef HAVE_conditional_move
215 /* Indexed by the machine mode, gives the insn code to make a conditional
216 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
217 setcc_gen_code to cut down on the number of named patterns. Consider a day
218 when a lot more rtx codes are conditional (eg: for the ARM). */
220 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
223 static int add_equal_note
PROTO((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
224 static rtx widen_operand
PROTO((rtx
, enum machine_mode
,
225 enum machine_mode
, int, int));
226 static enum insn_code can_fix_p
PROTO((enum machine_mode
, enum machine_mode
,
228 static enum insn_code can_float_p
PROTO((enum machine_mode
, enum machine_mode
,
230 static rtx ftruncify
PROTO((rtx
));
231 static optab init_optab
PROTO((enum rtx_code
));
232 static void init_libfuncs
PROTO((optab
, int, int, char *, int));
233 static void init_integral_libfuncs
PROTO((optab
, char *, int));
234 static void init_floating_libfuncs
PROTO((optab
, char *, int));
235 static void init_complex_libfuncs
PROTO((optab
, char *, int));
237 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
238 the result of operation CODE applied to OP0 (and OP1 if it is a binary
241 If the last insn does not set TARGET, don't do anything, but return 1.
243 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
244 don't add the REG_EQUAL note but return 0. Our caller can then try
245 again, ensuring that TARGET is not one of the operands. */
248 add_equal_note (seq
, target
, code
, op0
, op1
)
258 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
259 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
260 || GET_CODE (seq
) != SEQUENCE
261 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
262 || GET_CODE (target
) == ZERO_EXTRACT
263 || (! rtx_equal_p (SET_DEST (set
), target
)
264 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
266 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
267 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
271 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
272 besides the last insn. */
273 if (reg_overlap_mentioned_p (target
, op0
)
274 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
275 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
276 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
279 if (GET_RTX_CLASS (code
) == '1')
280 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
));
282 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
284 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))
285 = gen_rtx (EXPR_LIST
, REG_EQUAL
, note
,
286 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1)));
291 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
292 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
293 not actually do a sign-extend or zero-extend, but can leave the
294 higher-order bits of the result rtx undefined, for example, in the case
295 of logical operations, but not right shifts. */
298 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
300 enum machine_mode mode
, oldmode
;
306 /* If we must extend do so. If OP is either a constant or a SUBREG
307 for a promoted object, also extend since it will be more efficient to
310 || GET_MODE (op
) == VOIDmode
311 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)))
312 return convert_modes (mode
, oldmode
, op
, unsignedp
);
314 /* If MODE is no wider than a single word, we return a paradoxical
316 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
317 return gen_rtx (SUBREG
, mode
, force_reg (GET_MODE (op
), op
), 0);
319 /* Otherwise, get an object of MODE, clobber it, and set the low-order
322 result
= gen_reg_rtx (mode
);
323 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, result
));
324 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
328 /* Generate code to perform an operation specified by BINOPTAB
329 on operands OP0 and OP1, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
341 enum machine_mode mode
;
346 enum optab_methods methods
;
348 enum optab_methods next_methods
349 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
350 ? OPTAB_WIDEN
: methods
);
351 enum mode_class
class;
352 enum machine_mode wider_mode
;
354 int commutative_op
= 0;
355 int shift_op
= (binoptab
->code
== ASHIFT
356 || binoptab
->code
== ASHIFTRT
357 || binoptab
->code
== LSHIFTRT
358 || binoptab
->code
== ROTATE
359 || binoptab
->code
== ROTATERT
);
360 rtx entry_last
= get_last_insn ();
363 class = GET_MODE_CLASS (mode
);
365 op0
= protect_from_queue (op0
, 0);
366 op1
= protect_from_queue (op1
, 0);
368 target
= protect_from_queue (target
, 1);
372 op0
= force_not_mem (op0
);
373 op1
= force_not_mem (op1
);
376 /* If subtracting an integer constant, convert this into an addition of
377 the negated constant. */
379 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
381 op1
= negate_rtx (mode
, op1
);
382 binoptab
= add_optab
;
385 /* If we are inside an appropriately-short loop and one operand is an
386 expensive constant, force it into a register. */
387 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
388 && rtx_cost (op0
, binoptab
->code
) > 2)
389 op0
= force_reg (mode
, op0
);
391 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
392 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > 2)
393 op1
= force_reg (mode
, op1
);
395 /* Record where to delete back to if we backtrack. */
396 last
= get_last_insn ();
398 /* If operation is commutative,
399 try to make the first operand a register.
400 Even better, try to make it the same as the target.
401 Also try to make the last operand a constant. */
402 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
403 || binoptab
== smul_widen_optab
404 || binoptab
== umul_widen_optab
405 || binoptab
== smul_highpart_optab
406 || binoptab
== umul_highpart_optab
)
410 if (((target
== 0 || GET_CODE (target
) == REG
)
411 ? ((GET_CODE (op1
) == REG
412 && GET_CODE (op0
) != REG
)
414 : rtx_equal_p (op1
, target
))
415 || GET_CODE (op0
) == CONST_INT
)
423 /* If we can do it with a three-operand insn, do so. */
425 if (methods
!= OPTAB_MUST_WIDEN
426 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
428 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
429 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
430 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
432 rtx xop0
= op0
, xop1
= op1
;
437 temp
= gen_reg_rtx (mode
);
439 /* If it is a commutative operator and the modes would match
440 if we would swap the operands, we can save the conversions. */
443 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
444 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
448 tmp
= op0
; op0
= op1
; op1
= tmp
;
449 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
453 /* In case the insn wants input operands in modes different from
454 the result, convert the operands. */
456 if (GET_MODE (op0
) != VOIDmode
457 && GET_MODE (op0
) != mode0
458 && mode0
!= VOIDmode
)
459 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
461 if (GET_MODE (xop1
) != VOIDmode
462 && GET_MODE (xop1
) != mode1
463 && mode1
!= VOIDmode
)
464 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
466 /* Now, if insn's predicates don't allow our operands, put them into
469 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
)
470 && mode0
!= VOIDmode
)
471 xop0
= copy_to_mode_reg (mode0
, xop0
);
473 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
)
474 && mode1
!= VOIDmode
)
475 xop1
= copy_to_mode_reg (mode1
, xop1
);
477 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
478 temp
= gen_reg_rtx (mode
);
480 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
483 /* If PAT is a multi-insn sequence, try to add an appropriate
484 REG_EQUAL note to it. If we can't because TEMP conflicts with an
485 operand, call ourselves again, this time without a target. */
486 if (GET_CODE (pat
) == SEQUENCE
487 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
489 delete_insns_since (last
);
490 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
498 delete_insns_since (last
);
501 /* If this is a multiply, see if we can do a widening operation that
502 takes operands of this mode and makes a wider mode. */
504 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
505 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
506 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
507 != CODE_FOR_nothing
))
509 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
510 unsignedp
? umul_widen_optab
: smul_widen_optab
,
511 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
515 if (GET_MODE_CLASS (mode
) == MODE_INT
)
516 return gen_lowpart (mode
, temp
);
518 return convert_to_mode (mode
, temp
, unsignedp
);
522 /* Look for a wider mode of the same class for which we think we
523 can open-code the operation. Check for a widening multiply at the
524 wider mode as well. */
526 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
527 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
528 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
529 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
531 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
532 || (binoptab
== smul_optab
533 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
534 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
535 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
536 != CODE_FOR_nothing
)))
538 rtx xop0
= op0
, xop1
= op1
;
541 /* For certain integer operations, we need not actually extend
542 the narrow operands, as long as we will truncate
543 the results to the same narrowness. */
545 if ((binoptab
== ior_optab
|| binoptab
== and_optab
546 || binoptab
== xor_optab
547 || binoptab
== add_optab
|| binoptab
== sub_optab
548 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
549 && class == MODE_INT
)
552 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
554 /* The second operand of a shift must always be extended. */
555 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
556 no_extend
&& binoptab
!= ashl_optab
);
558 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
559 unsignedp
, OPTAB_DIRECT
);
562 if (class != MODE_INT
)
565 target
= gen_reg_rtx (mode
);
566 convert_move (target
, temp
, 0);
570 return gen_lowpart (mode
, temp
);
573 delete_insns_since (last
);
577 /* These can be done a word at a time. */
578 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
580 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
581 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
587 /* If TARGET is the same as one of the operands, the REG_EQUAL note
588 won't be accurate, so use a new target. */
589 if (target
== 0 || target
== op0
|| target
== op1
)
590 target
= gen_reg_rtx (mode
);
594 /* Do the actual arithmetic. */
595 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
597 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
598 rtx x
= expand_binop (word_mode
, binoptab
,
599 operand_subword_force (op0
, i
, mode
),
600 operand_subword_force (op1
, i
, mode
),
601 target_piece
, unsignedp
, next_methods
);
606 if (target_piece
!= x
)
607 emit_move_insn (target_piece
, x
);
610 insns
= get_insns ();
613 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
615 if (binoptab
->code
!= UNKNOWN
)
617 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
621 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
626 /* Synthesize double word shifts from single word shifts. */
627 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
628 || binoptab
== ashr_optab
)
630 && GET_CODE (op1
) == CONST_INT
631 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
632 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
633 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
634 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
636 rtx insns
, inter
, equiv_value
;
637 rtx into_target
, outof_target
;
638 rtx into_input
, outof_input
;
639 int shift_count
, left_shift
, outof_word
;
641 /* If TARGET is the same as one of the operands, the REG_EQUAL note
642 won't be accurate, so use a new target. */
643 if (target
== 0 || target
== op0
|| target
== op1
)
644 target
= gen_reg_rtx (mode
);
648 shift_count
= INTVAL (op1
);
650 /* OUTOF_* is the word we are shifting bits away from, and
651 INTO_* is the word that we are shifting bits towards, thus
652 they differ depending on the direction of the shift and
655 left_shift
= binoptab
== ashl_optab
;
656 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
658 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
659 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
661 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
662 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
664 if (shift_count
>= BITS_PER_WORD
)
666 inter
= expand_binop (word_mode
, binoptab
,
668 GEN_INT (shift_count
- BITS_PER_WORD
),
669 into_target
, unsignedp
, next_methods
);
671 if (inter
!= 0 && inter
!= into_target
)
672 emit_move_insn (into_target
, inter
);
674 /* For a signed right shift, we must fill the word we are shifting
675 out of with copies of the sign bit. Otherwise it is zeroed. */
676 if (inter
!= 0 && binoptab
!= ashr_optab
)
677 inter
= CONST0_RTX (word_mode
);
679 inter
= expand_binop (word_mode
, binoptab
,
681 GEN_INT (BITS_PER_WORD
- 1),
682 outof_target
, unsignedp
, next_methods
);
684 if (inter
!= 0 && inter
!= outof_target
)
685 emit_move_insn (outof_target
, inter
);
690 optab reverse_unsigned_shift
, unsigned_shift
;
692 /* For a shift of less then BITS_PER_WORD, to compute the carry,
693 we must do a logical shift in the opposite direction of the
696 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
698 /* For a shift of less than BITS_PER_WORD, to compute the word
699 shifted towards, we need to unsigned shift the orig value of
702 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
704 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
706 GEN_INT (BITS_PER_WORD
- shift_count
),
707 0, unsignedp
, next_methods
);
712 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
713 op1
, 0, unsignedp
, next_methods
);
716 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
717 into_target
, unsignedp
, next_methods
);
719 if (inter
!= 0 && inter
!= into_target
)
720 emit_move_insn (into_target
, inter
);
723 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
724 op1
, outof_target
, unsignedp
, next_methods
);
726 if (inter
!= 0 && inter
!= outof_target
)
727 emit_move_insn (outof_target
, inter
);
730 insns
= get_insns ();
735 if (binoptab
->code
!= UNKNOWN
)
736 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
740 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
745 /* Synthesize double word rotates from single word shifts. */
746 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
748 && GET_CODE (op1
) == CONST_INT
749 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
750 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
751 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
753 rtx insns
, equiv_value
;
754 rtx into_target
, outof_target
;
755 rtx into_input
, outof_input
;
757 int shift_count
, left_shift
, outof_word
;
759 /* If TARGET is the same as one of the operands, the REG_EQUAL note
760 won't be accurate, so use a new target. */
761 if (target
== 0 || target
== op0
|| target
== op1
)
762 target
= gen_reg_rtx (mode
);
766 shift_count
= INTVAL (op1
);
768 /* OUTOF_* is the word we are shifting bits away from, and
769 INTO_* is the word that we are shifting bits towards, thus
770 they differ depending on the direction of the shift and
773 left_shift
= (binoptab
== rotl_optab
);
774 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
776 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
777 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
779 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
780 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
782 if (shift_count
== BITS_PER_WORD
)
784 /* This is just a word swap. */
785 emit_move_insn (outof_target
, into_input
);
786 emit_move_insn (into_target
, outof_input
);
791 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
792 rtx first_shift_count
, second_shift_count
;
793 optab reverse_unsigned_shift
, unsigned_shift
;
795 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
796 ? lshr_optab
: ashl_optab
);
798 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
799 ? ashl_optab
: lshr_optab
);
801 if (shift_count
> BITS_PER_WORD
)
803 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
804 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
808 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
809 second_shift_count
= GEN_INT (shift_count
);
812 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
813 outof_input
, first_shift_count
,
814 NULL_RTX
, unsignedp
, next_methods
);
815 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
816 into_input
, second_shift_count
,
817 into_target
, unsignedp
, next_methods
);
819 if (into_temp1
!= 0 && into_temp2
!= 0)
820 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
821 into_target
, unsignedp
, next_methods
);
825 if (inter
!= 0 && inter
!= into_target
)
826 emit_move_insn (into_target
, inter
);
828 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
829 into_input
, first_shift_count
,
830 NULL_RTX
, unsignedp
, next_methods
);
831 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
832 outof_input
, second_shift_count
,
833 outof_target
, unsignedp
, next_methods
);
835 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
836 inter
= expand_binop (word_mode
, ior_optab
,
837 outof_temp1
, outof_temp2
,
838 outof_target
, unsignedp
, next_methods
);
840 if (inter
!= 0 && inter
!= outof_target
)
841 emit_move_insn (outof_target
, inter
);
844 insns
= get_insns ();
849 if (binoptab
->code
!= UNKNOWN
)
850 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
854 /* We can't make this a no conflict block if this is a word swap,
855 because the word swap case fails if the input and output values
856 are in the same register. */
857 if (shift_count
!= BITS_PER_WORD
)
858 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
867 /* These can be done a word at a time by propagating carries. */
868 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
870 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
871 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
874 rtx carry_tmp
= gen_reg_rtx (word_mode
);
875 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
876 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
877 rtx carry_in
, carry_out
;
880 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
881 value is one of those, use it. Otherwise, use 1 since it is the
882 one easiest to get. */
883 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
884 int normalizep
= STORE_FLAG_VALUE
;
889 /* Prepare the operands. */
890 xop0
= force_reg (mode
, op0
);
891 xop1
= force_reg (mode
, op1
);
893 if (target
== 0 || GET_CODE (target
) != REG
894 || target
== xop0
|| target
== xop1
)
895 target
= gen_reg_rtx (mode
);
897 /* Indicate for flow that the entire target reg is being set. */
898 if (GET_CODE (target
) == REG
)
899 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
901 /* Do the actual arithmetic. */
902 for (i
= 0; i
< nwords
; i
++)
904 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
905 rtx target_piece
= operand_subword (target
, index
, 1, mode
);
906 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
907 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
910 /* Main add/subtract of the input operands. */
911 x
= expand_binop (word_mode
, binoptab
,
912 op0_piece
, op1_piece
,
913 target_piece
, unsignedp
, next_methods
);
919 /* Store carry from main add/subtract. */
920 carry_out
= gen_reg_rtx (word_mode
);
921 carry_out
= emit_store_flag (carry_out
,
922 binoptab
== add_optab
? LTU
: GTU
,
924 word_mode
, 1, normalizep
);
931 /* Add/subtract previous carry to main result. */
932 x
= expand_binop (word_mode
,
933 normalizep
== 1 ? binoptab
: otheroptab
,
935 target_piece
, 1, next_methods
);
938 else if (target_piece
!= x
)
939 emit_move_insn (target_piece
, x
);
943 /* THIS CODE HAS NOT BEEN TESTED. */
944 /* Get out carry from adding/subtracting carry in. */
945 carry_tmp
= emit_store_flag (carry_tmp
,
946 binoptab
== add_optab
949 word_mode
, 1, normalizep
);
951 /* Logical-ior the two poss. carry together. */
952 carry_out
= expand_binop (word_mode
, ior_optab
,
953 carry_out
, carry_tmp
,
954 carry_out
, 0, next_methods
);
960 carry_in
= carry_out
;
963 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
965 rtx temp
= emit_move_insn (target
, target
);
967 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
968 gen_rtx (binoptab
->code
, mode
,
975 delete_insns_since (last
);
978 /* If we want to multiply two two-word values and have normal and widening
979 multiplies of single-word values, we can do this with three smaller
980 multiplications. Note that we do not make a REG_NO_CONFLICT block here
981 because we are not operating on one word at a time.
983 The multiplication proceeds as follows:
984 _______________________
985 [__op0_high_|__op0_low__]
986 _______________________
987 * [__op1_high_|__op1_low__]
988 _______________________________________________
989 _______________________
990 (1) [__op0_low__*__op1_low__]
991 _______________________
992 (2a) [__op0_low__*__op1_high_]
993 _______________________
994 (2b) [__op0_high_*__op1_low__]
995 _______________________
996 (3) [__op0_high_*__op1_high_]
999 This gives a 4-word result. Since we are only interested in the
1000 lower 2 words, partial result (3) and the upper words of (2a) and
1001 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1002 calculated using non-widening multiplication.
1004 (1), however, needs to be calculated with an unsigned widening
1005 multiplication. If this operation is not directly supported we
1006 try using a signed widening multiplication and adjust the result.
1007 This adjustment works as follows:
1009 If both operands are positive then no adjustment is needed.
1011 If the operands have different signs, for example op0_low < 0 and
1012 op1_low >= 0, the instruction treats the most significant bit of
1013 op0_low as a sign bit instead of a bit with significance
1014 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1015 with 2**BITS_PER_WORD - op0_low, and two's complements the
1016 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1019 Similarly, if both operands are negative, we need to add
1020 (op0_low + op1_low) * 2**BITS_PER_WORD.
1022 We use a trick to adjust quickly. We logically shift op0_low right
1023 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1024 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1025 logical shift exists, we do an arithmetic right shift and subtract
1028 if (binoptab
== smul_optab
1029 && class == MODE_INT
1030 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1031 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1032 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1033 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1034 != CODE_FOR_nothing
)
1035 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1036 != CODE_FOR_nothing
)))
1038 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1039 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1040 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1041 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1042 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1043 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1048 /* If the target is the same as one of the inputs, don't use it. This
1049 prevents problems with the REG_EQUAL note. */
1050 if (target
== op0
|| target
== op1
)
1053 /* Multiply the two lower words to get a double-word product.
1054 If unsigned widening multiplication is available, use that;
1055 otherwise use the signed form and compensate. */
1057 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1059 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1060 target
, 1, OPTAB_DIRECT
);
1062 /* If we didn't succeed, delete everything we did so far. */
1064 delete_insns_since (last
);
1066 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1070 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1071 != CODE_FOR_nothing
)
1073 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1074 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1075 target
, 1, OPTAB_DIRECT
);
1076 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1077 NULL_RTX
, 1, next_methods
);
1079 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1080 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1083 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1084 NULL_RTX
, 0, next_methods
);
1086 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1087 op0_xhigh
, op0_xhigh
, 0,
1091 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1092 NULL_RTX
, 1, next_methods
);
1094 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1095 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1098 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1099 NULL_RTX
, 0, next_methods
);
1101 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1102 op1_xhigh
, op1_xhigh
, 0,
1107 /* If we have been able to directly compute the product of the
1108 low-order words of the operands and perform any required adjustments
1109 of the operands, we proceed by trying two more multiplications
1110 and then computing the appropriate sum.
1112 We have checked above that the required addition is provided.
1113 Full-word addition will normally always succeed, especially if
1114 it is provided at all, so we don't worry about its failure. The
1115 multiplication may well fail, however, so we do handle that. */
1117 if (product
&& op0_xhigh
&& op1_xhigh
)
1119 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1120 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1121 NULL_RTX
, 0, OPTAB_DIRECT
);
1124 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1125 product_high
, 0, next_methods
);
1127 if (temp
!= 0 && temp
!= product_high
)
1128 emit_move_insn (product_high
, temp
);
1131 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1132 NULL_RTX
, 0, OPTAB_DIRECT
);
1135 temp
= expand_binop (word_mode
, add_optab
, temp
,
1136 product_high
, product_high
,
1139 if (temp
!= 0 && temp
!= product_high
)
1140 emit_move_insn (product_high
, temp
);
1144 temp
= emit_move_insn (product
, product
);
1145 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
1146 gen_rtx (MULT
, mode
, copy_rtx (op0
),
1154 /* If we get here, we couldn't do it for some reason even though we
1155 originally thought we could. Delete anything we've emitted in
1158 delete_insns_since (last
);
1161 /* We need to open-code the complex type operations: '+, -, * and /' */
1163 /* At this point we allow operations between two similar complex
1164 numbers, and also if one of the operands is not a complex number
1165 but rather of MODE_FLOAT or MODE_INT. However, the caller
1166 must make sure that the MODE of the non-complex operand matches
1167 the SUBMODE of the complex operand. */
1169 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1171 rtx real0
= 0, imag0
= 0;
1172 rtx real1
= 0, imag1
= 0;
1173 rtx realr
, imagr
, res
;
1178 /* Find the correct mode for the real and imaginary parts */
1179 enum machine_mode submode
1180 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1181 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1184 if (submode
== BLKmode
)
1188 target
= gen_reg_rtx (mode
);
1192 realr
= gen_realpart (submode
, target
);
1193 imagr
= gen_imagpart (submode
, target
);
1195 if (GET_MODE (op0
) == mode
)
1197 real0
= gen_realpart (submode
, op0
);
1198 imag0
= gen_imagpart (submode
, op0
);
1203 if (GET_MODE (op1
) == mode
)
1205 real1
= gen_realpart (submode
, op1
);
1206 imag1
= gen_imagpart (submode
, op1
);
1211 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0|| imag1
!= 0))
1214 switch (binoptab
->code
)
1217 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1219 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1220 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1221 realr
, unsignedp
, methods
);
1225 else if (res
!= realr
)
1226 emit_move_insn (realr
, res
);
1229 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1230 imagr
, unsignedp
, methods
);
1233 else if (binoptab
->code
== MINUS
)
1234 res
= expand_unop (submode
, neg_optab
, imag1
, imagr
, unsignedp
);
1240 else if (res
!= imagr
)
1241 emit_move_insn (imagr
, res
);
1247 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1253 /* Don't fetch these from memory more than once. */
1254 real0
= force_reg (submode
, real0
);
1255 real1
= force_reg (submode
, real1
);
1256 imag0
= force_reg (submode
, imag0
);
1257 imag1
= force_reg (submode
, imag1
);
1259 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1260 unsignedp
, methods
);
1262 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1263 unsignedp
, methods
);
1265 if (temp1
== 0 || temp2
== 0)
1268 res
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1269 realr
, unsignedp
, methods
);
1273 else if (res
!= realr
)
1274 emit_move_insn (realr
, res
);
1276 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1277 NULL_RTX
, unsignedp
, methods
);
1279 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1280 NULL_RTX
, unsignedp
, methods
);
1282 if (temp1
== 0 || temp2
== 0)
1285 res
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1286 imagr
, unsignedp
, methods
);
1290 else if (res
!= imagr
)
1291 emit_move_insn (imagr
, res
);
1297 /* Don't fetch these from memory more than once. */
1298 real0
= force_reg (submode
, real0
);
1299 real1
= force_reg (submode
, real1
);
1301 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1302 realr
, unsignedp
, methods
);
1305 else if (res
!= realr
)
1306 emit_move_insn (realr
, res
);
1309 res
= expand_binop (submode
, binoptab
,
1310 real1
, imag0
, imagr
, unsignedp
, methods
);
1312 res
= expand_binop (submode
, binoptab
,
1313 real0
, imag1
, imagr
, unsignedp
, methods
);
1317 else if (res
!= imagr
)
1318 emit_move_insn (imagr
, res
);
1325 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1329 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1331 /* Don't fetch these from memory more than once. */
1332 real1
= force_reg (submode
, real1
);
1334 /* Simply divide the real and imaginary parts by `c' */
1335 if (class == MODE_COMPLEX_FLOAT
)
1336 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1337 realr
, unsignedp
, methods
);
1339 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1340 real0
, real1
, realr
, unsignedp
);
1344 else if (res
!= realr
)
1345 emit_move_insn (realr
, res
);
1347 if (class == MODE_COMPLEX_FLOAT
)
1348 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1349 imagr
, unsignedp
, methods
);
1351 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1352 imag0
, real1
, imagr
, unsignedp
);
1356 else if (res
!= imagr
)
1357 emit_move_insn (imagr
, res
);
1363 /* Divisor is of complex type:
1370 /* Don't fetch these from memory more than once. */
1371 real0
= force_reg (submode
, real0
);
1372 real1
= force_reg (submode
, real1
);
1375 imag0
= force_reg (submode
, imag0
);
1377 imag1
= force_reg (submode
, imag1
);
1379 /* Divisor: c*c + d*d */
1380 temp1
= expand_binop (submode
, smul_optab
, real1
, real1
,
1381 NULL_RTX
, unsignedp
, methods
);
1383 temp2
= expand_binop (submode
, smul_optab
, imag1
, imag1
,
1384 NULL_RTX
, unsignedp
, methods
);
1386 if (temp1
== 0 || temp2
== 0)
1389 divisor
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1390 NULL_RTX
, unsignedp
, methods
);
1396 /* ((a)(c-id))/divisor */
1397 /* (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)) */
1399 /* Calculate the dividend */
1400 real_t
= expand_binop (submode
, smul_optab
, real0
, real1
,
1401 NULL_RTX
, unsignedp
, methods
);
1403 imag_t
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1404 NULL_RTX
, unsignedp
, methods
);
1406 if (real_t
== 0 || imag_t
== 0)
1409 imag_t
= expand_unop (submode
, neg_optab
, imag_t
,
1410 NULL_RTX
, unsignedp
);
1414 /* ((a+ib)(c-id))/divider */
1415 /* Calculate the dividend */
1416 temp1
= expand_binop (submode
, smul_optab
, real0
, real1
,
1417 NULL_RTX
, unsignedp
, methods
);
1419 temp2
= expand_binop (submode
, smul_optab
, imag0
, imag1
,
1420 NULL_RTX
, unsignedp
, methods
);
1422 if (temp1
== 0 || temp2
== 0)
1425 real_t
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1426 NULL_RTX
, unsignedp
, methods
);
1428 temp1
= expand_binop (submode
, smul_optab
, imag0
, real1
,
1429 NULL_RTX
, unsignedp
, methods
);
1431 temp2
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1432 NULL_RTX
, unsignedp
, methods
);
1434 if (temp1
== 0 || temp2
== 0)
1437 imag_t
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1438 NULL_RTX
, unsignedp
, methods
);
1440 if (real_t
== 0 || imag_t
== 0)
1444 if (class == MODE_COMPLEX_FLOAT
)
1445 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
1446 realr
, unsignedp
, methods
);
1448 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1449 real_t
, divisor
, realr
, unsignedp
);
1453 else if (res
!= realr
)
1454 emit_move_insn (realr
, res
);
1456 if (class == MODE_COMPLEX_FLOAT
)
1457 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
1458 imagr
, unsignedp
, methods
);
1460 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1461 imag_t
, divisor
, imagr
, unsignedp
);
1465 else if (res
!= imagr
)
1466 emit_move_insn (imagr
, res
);
1481 if (binoptab
->code
!= UNKNOWN
)
1483 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
1487 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1493 /* It can't be open-coded in this mode.
1494 Use a library call if one is available and caller says that's ok. */
1496 if (binoptab
->handlers
[(int) mode
].libfunc
1497 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1500 rtx funexp
= binoptab
->handlers
[(int) mode
].libfunc
;
1502 enum machine_mode op1_mode
= mode
;
1509 op1_mode
= word_mode
;
1510 /* Specify unsigned here,
1511 since negative shift counts are meaningless. */
1512 op1x
= convert_to_mode (word_mode
, op1
, 1);
1515 if (GET_MODE (op0
) != VOIDmode
1516 && GET_MODE (op0
) != mode
)
1517 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1519 /* Pass 1 for NO_QUEUE so we don't lose any increments
1520 if the libcall is cse'd or moved. */
1521 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1522 NULL_RTX
, 1, mode
, 2,
1523 op0
, mode
, op1x
, op1_mode
);
1525 insns
= get_insns ();
1528 target
= gen_reg_rtx (mode
);
1529 emit_libcall_block (insns
, target
, value
,
1530 gen_rtx (binoptab
->code
, mode
, op0
, op1
));
1535 delete_insns_since (last
);
1537 /* It can't be done in this mode. Can we do it in a wider mode? */
1539 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1540 || methods
== OPTAB_MUST_WIDEN
))
1542 /* Caller says, don't even try. */
1543 delete_insns_since (entry_last
);
1547 /* Compute the value of METHODS to pass to recursive calls.
1548 Don't allow widening to be tried recursively. */
1550 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1552 /* Look for a wider mode of the same class for which it appears we can do
1555 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1557 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1558 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1560 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1561 != CODE_FOR_nothing
)
1562 || (methods
== OPTAB_LIB
1563 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1565 rtx xop0
= op0
, xop1
= op1
;
1568 /* For certain integer operations, we need not actually extend
1569 the narrow operands, as long as we will truncate
1570 the results to the same narrowness. */
1572 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1573 || binoptab
== xor_optab
1574 || binoptab
== add_optab
|| binoptab
== sub_optab
1575 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1576 && class == MODE_INT
)
1579 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1580 unsignedp
, no_extend
);
1582 /* The second operand of a shift must always be extended. */
1583 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1584 no_extend
&& binoptab
!= ashl_optab
);
1586 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1587 unsignedp
, methods
);
1590 if (class != MODE_INT
)
1593 target
= gen_reg_rtx (mode
);
1594 convert_move (target
, temp
, 0);
1598 return gen_lowpart (mode
, temp
);
1601 delete_insns_since (last
);
1606 delete_insns_since (entry_last
);
1610 /* Expand a binary operator which has both signed and unsigned forms.
1611 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1614 If we widen unsigned operands, we may use a signed wider operation instead
1615 of an unsigned wider operation, since the result would be the same. */
1618 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1619 enum machine_mode mode
;
1620 optab uoptab
, soptab
;
1621 rtx op0
, op1
, target
;
1623 enum optab_methods methods
;
1626 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1627 struct optab wide_soptab
;
1629 /* Do it without widening, if possible. */
1630 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1631 unsignedp
, OPTAB_DIRECT
);
1632 if (temp
|| methods
== OPTAB_DIRECT
)
1635 /* Try widening to a signed int. Make a fake signed optab that
1636 hides any signed insn for direct use. */
1637 wide_soptab
= *soptab
;
1638 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1639 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1641 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1642 unsignedp
, OPTAB_WIDEN
);
1644 /* For unsigned operands, try widening to an unsigned int. */
1645 if (temp
== 0 && unsignedp
)
1646 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1647 unsignedp
, OPTAB_WIDEN
);
1648 if (temp
|| methods
== OPTAB_WIDEN
)
1651 /* Use the right width lib call if that exists. */
1652 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1653 if (temp
|| methods
== OPTAB_LIB
)
1656 /* Must widen and use a lib call, use either signed or unsigned. */
1657 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1658 unsignedp
, methods
);
1662 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1663 unsignedp
, methods
);
1667 /* Generate code to perform an operation specified by BINOPTAB
1668 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1669 We assume that the order of the operands for the instruction
1670 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1671 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1673 Either TARG0 or TARG1 may be zero, but what that means is that
1674 that result is not actually wanted. We will generate it into
1675 a dummy pseudo-reg and discard it. They may not both be zero.
1677 Returns 1 if this operation can be performed; 0 if not. */
1680 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1686 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1687 enum mode_class
class;
1688 enum machine_mode wider_mode
;
1689 rtx entry_last
= get_last_insn ();
1692 class = GET_MODE_CLASS (mode
);
1694 op0
= protect_from_queue (op0
, 0);
1695 op1
= protect_from_queue (op1
, 0);
1699 op0
= force_not_mem (op0
);
1700 op1
= force_not_mem (op1
);
1703 /* If we are inside an appropriately-short loop and one operand is an
1704 expensive constant, force it into a register. */
1705 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1706 && rtx_cost (op0
, binoptab
->code
) > 2)
1707 op0
= force_reg (mode
, op0
);
1709 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1710 && rtx_cost (op1
, binoptab
->code
) > 2)
1711 op1
= force_reg (mode
, op1
);
1714 targ0
= protect_from_queue (targ0
, 1);
1716 targ0
= gen_reg_rtx (mode
);
1718 targ1
= protect_from_queue (targ1
, 1);
1720 targ1
= gen_reg_rtx (mode
);
1722 /* Record where to go back to if we fail. */
1723 last
= get_last_insn ();
1725 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1727 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1728 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1729 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
1731 rtx xop0
= op0
, xop1
= op1
;
1733 /* In case this insn wants input operands in modes different from the
1734 result, convert the operands. */
1735 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
1736 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1738 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
1739 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
1741 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1742 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1743 xop0
= copy_to_mode_reg (mode0
, xop0
);
1745 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
))
1746 xop1
= copy_to_mode_reg (mode1
, xop1
);
1748 /* We could handle this, but we should always be called with a pseudo
1749 for our targets and all insns should take them as outputs. */
1750 if (! (*insn_operand_predicate
[icode
][0]) (targ0
, mode
)
1751 || ! (*insn_operand_predicate
[icode
][3]) (targ1
, mode
))
1754 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1761 delete_insns_since (last
);
1764 /* It can't be done in this mode. Can we do it in a wider mode? */
1766 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1768 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1769 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1771 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1772 != CODE_FOR_nothing
)
1774 register rtx t0
= gen_reg_rtx (wider_mode
);
1775 register rtx t1
= gen_reg_rtx (wider_mode
);
1777 if (expand_twoval_binop (binoptab
,
1778 convert_modes (wider_mode
, mode
, op0
,
1780 convert_modes (wider_mode
, mode
, op1
,
1784 convert_move (targ0
, t0
, unsignedp
);
1785 convert_move (targ1
, t1
, unsignedp
);
1789 delete_insns_since (last
);
1794 delete_insns_since (entry_last
);
1798 /* Generate code to perform an operation specified by UNOPTAB
1799 on operand OP0, with result having machine-mode MODE.
1801 UNSIGNEDP is for the case where we have to widen the operands
1802 to perform the operation. It says to use zero-extension.
1804 If TARGET is nonzero, the value
1805 is generated there, if it is convenient to do so.
1806 In all cases an rtx is returned for the locus of the value;
1807 this may or may not be TARGET. */
1810 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
1811 enum machine_mode mode
;
1817 enum mode_class
class;
1818 enum machine_mode wider_mode
;
1820 rtx last
= get_last_insn ();
1823 class = GET_MODE_CLASS (mode
);
1825 op0
= protect_from_queue (op0
, 0);
1829 op0
= force_not_mem (op0
);
1833 target
= protect_from_queue (target
, 1);
1835 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1837 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1838 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1844 temp
= gen_reg_rtx (mode
);
1846 if (GET_MODE (xop0
) != VOIDmode
1847 && GET_MODE (xop0
) != mode0
)
1848 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1850 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
1852 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1853 xop0
= copy_to_mode_reg (mode0
, xop0
);
1855 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
1856 temp
= gen_reg_rtx (mode
);
1858 pat
= GEN_FCN (icode
) (temp
, xop0
);
1861 if (GET_CODE (pat
) == SEQUENCE
1862 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
1864 delete_insns_since (last
);
1865 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
1873 delete_insns_since (last
);
1876 /* It can't be done in this mode. Can we open-code it in a wider mode? */
1878 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1879 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1880 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1882 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
1886 /* For certain operations, we need not actually extend
1887 the narrow operand, as long as we will truncate the
1888 results to the same narrowness. */
1890 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
1891 (unoptab
== neg_optab
1892 || unoptab
== one_cmpl_optab
)
1893 && class == MODE_INT
);
1895 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
1900 if (class != MODE_INT
)
1903 target
= gen_reg_rtx (mode
);
1904 convert_move (target
, temp
, 0);
1908 return gen_lowpart (mode
, temp
);
1911 delete_insns_since (last
);
1915 /* These can be done a word at a time. */
1916 if (unoptab
== one_cmpl_optab
1917 && class == MODE_INT
1918 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1919 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1924 if (target
== 0 || target
== op0
)
1925 target
= gen_reg_rtx (mode
);
1929 /* Do the actual arithmetic. */
1930 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1932 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1933 rtx x
= expand_unop (word_mode
, unoptab
,
1934 operand_subword_force (op0
, i
, mode
),
1935 target_piece
, unsignedp
);
1936 if (target_piece
!= x
)
1937 emit_move_insn (target_piece
, x
);
1940 insns
= get_insns ();
1943 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
1944 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1948 /* Open-code the complex negation operation. */
1949 else if (unoptab
== neg_optab
1950 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
1956 /* Find the correct mode for the real and imaginary parts */
1957 enum machine_mode submode
1958 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1959 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1962 if (submode
== BLKmode
)
1966 target
= gen_reg_rtx (mode
);
1970 target_piece
= gen_imagpart (submode
, target
);
1971 x
= expand_unop (submode
, unoptab
,
1972 gen_imagpart (submode
, op0
),
1973 target_piece
, unsignedp
);
1974 if (target_piece
!= x
)
1975 emit_move_insn (target_piece
, x
);
1977 target_piece
= gen_realpart (submode
, target
);
1978 x
= expand_unop (submode
, unoptab
,
1979 gen_realpart (submode
, op0
),
1980 target_piece
, unsignedp
);
1981 if (target_piece
!= x
)
1982 emit_move_insn (target_piece
, x
);
1987 emit_no_conflict_block (seq
, target
, op0
, 0,
1988 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1992 /* Now try a library call in this mode. */
1993 if (unoptab
->handlers
[(int) mode
].libfunc
)
1996 rtx funexp
= unoptab
->handlers
[(int) mode
].libfunc
;
2001 /* Pass 1 for NO_QUEUE so we don't lose any increments
2002 if the libcall is cse'd or moved. */
2003 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2004 NULL_RTX
, 1, mode
, 1, op0
, mode
);
2005 insns
= get_insns ();
2008 target
= gen_reg_rtx (mode
);
2009 emit_libcall_block (insns
, target
, value
,
2010 gen_rtx (unoptab
->code
, mode
, op0
));
2015 /* It can't be done in this mode. Can we do it in a wider mode? */
2017 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2019 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2020 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2022 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2023 != CODE_FOR_nothing
)
2024 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2028 /* For certain operations, we need not actually extend
2029 the narrow operand, as long as we will truncate the
2030 results to the same narrowness. */
2032 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2033 (unoptab
== neg_optab
2034 || unoptab
== one_cmpl_optab
)
2035 && class == MODE_INT
);
2037 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2042 if (class != MODE_INT
)
2045 target
= gen_reg_rtx (mode
);
2046 convert_move (target
, temp
, 0);
2050 return gen_lowpart (mode
, temp
);
2053 delete_insns_since (last
);
2058 /* If there is no negate operation, try doing a subtract from zero.
2059 The US Software GOFAST library needs this. */
2060 if (unoptab
== neg_optab
)
2063 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2064 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2072 /* Emit code to compute the absolute value of OP0, with result to
2073 TARGET if convenient. (TARGET may be 0.) The return value says
2074 where the result actually is to be found.
2076 MODE is the mode of the operand; the mode of the result is
2077 different but can be deduced from MODE.
2079 UNSIGNEDP is relevant if extension is needed. */
2082 expand_abs (mode
, op0
, target
, unsignedp
, safe
)
2083 enum machine_mode mode
;
2091 /* First try to do it with a special abs instruction. */
2092 temp
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2096 /* If this machine has expensive jumps, we can do integer absolute
2097 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2098 where W is the width of MODE. */
2100 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2102 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2103 size_int (GET_MODE_BITSIZE (mode
) - 1),
2106 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2109 temp
= expand_binop (mode
, sub_optab
, temp
, extended
, target
, 0,
2116 /* If that does not win, use conditional jump and negate. */
2117 op1
= gen_label_rtx ();
2118 if (target
== 0 || ! safe
2119 || GET_MODE (target
) != mode
2120 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2121 || (GET_CODE (target
) == REG
2122 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2123 target
= gen_reg_rtx (mode
);
2125 emit_move_insn (target
, op0
);
2128 /* If this mode is an integer too wide to compare properly,
2129 compare word by word. Rely on CSE to optimize constant cases. */
2130 if (GET_MODE_CLASS (mode
) == MODE_INT
&& ! can_compare_p (mode
))
2131 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2135 temp
= compare_from_rtx (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2137 if (temp
== const1_rtx
)
2139 else if (temp
!= const0_rtx
)
2141 if (bcc_gen_fctn
[(int) GET_CODE (temp
)] != 0)
2142 emit_jump_insn ((*bcc_gen_fctn
[(int) GET_CODE (temp
)]) (op1
));
2148 op0
= expand_unop (mode
, neg_optab
, target
, target
, 0);
2150 emit_move_insn (target
, op0
);
2156 /* Emit code to compute the absolute value of OP0, with result to
2157 TARGET if convenient. (TARGET may be 0.) The return value says
2158 where the result actually is to be found.
2160 MODE is the mode of the operand; the mode of the result is
2161 different but can be deduced from MODE.
2163 UNSIGNEDP is relevant for complex integer modes. */
2166 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2167 enum machine_mode mode
;
2172 enum mode_class
class = GET_MODE_CLASS (mode
);
2173 enum machine_mode wider_mode
;
2175 rtx entry_last
= get_last_insn ();
2179 /* Find the correct mode for the real and imaginary parts. */
2180 enum machine_mode submode
2181 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2182 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2185 if (submode
== BLKmode
)
2188 op0
= protect_from_queue (op0
, 0);
2192 op0
= force_not_mem (op0
);
2195 last
= get_last_insn ();
2198 target
= protect_from_queue (target
, 1);
2200 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2202 int icode
= (int) abs_optab
->handlers
[(int) mode
].insn_code
;
2203 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2209 temp
= gen_reg_rtx (submode
);
2211 if (GET_MODE (xop0
) != VOIDmode
2212 && GET_MODE (xop0
) != mode0
)
2213 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2215 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2217 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
2218 xop0
= copy_to_mode_reg (mode0
, xop0
);
2220 if (! (*insn_operand_predicate
[icode
][0]) (temp
, submode
))
2221 temp
= gen_reg_rtx (submode
);
2223 pat
= GEN_FCN (icode
) (temp
, xop0
);
2226 if (GET_CODE (pat
) == SEQUENCE
2227 && ! add_equal_note (pat
, temp
, abs_optab
->code
, xop0
, NULL_RTX
))
2229 delete_insns_since (last
);
2230 return expand_unop (mode
, abs_optab
, op0
, NULL_RTX
, unsignedp
);
2238 delete_insns_since (last
);
2241 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2243 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2244 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2246 if (abs_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2250 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2251 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2255 if (class != MODE_COMPLEX_INT
)
2258 target
= gen_reg_rtx (submode
);
2259 convert_move (target
, temp
, 0);
2263 return gen_lowpart (submode
, temp
);
2266 delete_insns_since (last
);
2270 /* Open-code the complex absolute-value operation
2271 if we can open-code sqrt. Otherwise it's not worth while. */
2272 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
)
2274 rtx real
, imag
, total
;
2276 real
= gen_realpart (submode
, op0
);
2277 imag
= gen_imagpart (submode
, op0
);
2279 /* Square both parts. */
2280 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2281 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2283 /* Sum the parts. */
2284 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2285 0, OPTAB_LIB_WIDEN
);
2287 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2288 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2290 delete_insns_since (last
);
2295 /* Now try a library call in this mode. */
2296 if (abs_optab
->handlers
[(int) mode
].libfunc
)
2299 rtx funexp
= abs_optab
->handlers
[(int) mode
].libfunc
;
2304 /* Pass 1 for NO_QUEUE so we don't lose any increments
2305 if the libcall is cse'd or moved. */
2306 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2307 NULL_RTX
, 1, submode
, 1, op0
, mode
);
2308 insns
= get_insns ();
2311 target
= gen_reg_rtx (submode
);
2312 emit_libcall_block (insns
, target
, value
,
2313 gen_rtx (abs_optab
->code
, mode
, op0
));
2318 /* It can't be done in this mode. Can we do it in a wider mode? */
2320 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2321 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2323 if ((abs_optab
->handlers
[(int) wider_mode
].insn_code
2324 != CODE_FOR_nothing
)
2325 || abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2329 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2331 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2335 if (class != MODE_COMPLEX_INT
)
2338 target
= gen_reg_rtx (submode
);
2339 convert_move (target
, temp
, 0);
2343 return gen_lowpart (submode
, temp
);
2346 delete_insns_since (last
);
2350 delete_insns_since (entry_last
);
2354 /* Generate an instruction whose insn-code is INSN_CODE,
2355 with two operands: an output TARGET and an input OP0.
2356 TARGET *must* be nonzero, and the output is always stored there.
2357 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2358 the value that is stored into TARGET. */
2361 emit_unop_insn (icode
, target
, op0
, code
)
2368 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2371 temp
= target
= protect_from_queue (target
, 1);
2373 op0
= protect_from_queue (op0
, 0);
2376 op0
= force_not_mem (op0
);
2378 /* Now, if insn does not accept our operands, put them into pseudos. */
2380 if (! (*insn_operand_predicate
[icode
][1]) (op0
, mode0
))
2381 op0
= copy_to_mode_reg (mode0
, op0
);
2383 if (! (*insn_operand_predicate
[icode
][0]) (temp
, GET_MODE (temp
))
2384 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2385 temp
= gen_reg_rtx (GET_MODE (temp
));
2387 pat
= GEN_FCN (icode
) (temp
, op0
);
2389 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2390 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2395 emit_move_insn (target
, temp
);
2398 /* Emit code to perform a series of operations on a multi-word quantity, one
2401 Such a block is preceded by a CLOBBER of the output, consists of multiple
2402 insns, each setting one word of the output, and followed by a SET copying
2403 the output to itself.
2405 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2406 note indicating that it doesn't conflict with the (also multi-word)
2407 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2410 INSNS is a block of code generated to perform the operation, not including
2411 the CLOBBER and final copy. All insns that compute intermediate values
2412 are first emitted, followed by the block as described above.
2414 TARGET, OP0, and OP1 are the output and inputs of the operations,
2415 respectively. OP1 may be zero for a unary operation.
2417 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2420 If TARGET is not a register, INSNS is simply emitted with no special
2421 processing. Likewise if anything in INSNS is not an INSN or if
2422 there is a libcall block inside INSNS.
2424 The final insn emitted is returned. */
2427 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2433 rtx prev
, next
, first
, last
, insn
;
2435 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2436 return emit_insns (insns
);
2438 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2439 if (GET_CODE (insn
) != INSN
2440 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2441 return emit_insns (insns
);
2443 /* First emit all insns that do not store into words of the output and remove
2444 these from the list. */
2445 for (insn
= insns
; insn
; insn
= next
)
2450 next
= NEXT_INSN (insn
);
2452 if (GET_CODE (PATTERN (insn
)) == SET
)
2453 set
= PATTERN (insn
);
2454 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2456 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2457 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2459 set
= XVECEXP (PATTERN (insn
), 0, i
);
2467 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2469 if (PREV_INSN (insn
))
2470 NEXT_INSN (PREV_INSN (insn
)) = next
;
2475 PREV_INSN (next
) = PREV_INSN (insn
);
2481 prev
= get_last_insn ();
2483 /* Now write the CLOBBER of the output, followed by the setting of each
2484 of the words, followed by the final copy. */
2485 if (target
!= op0
&& target
!= op1
)
2486 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
2488 for (insn
= insns
; insn
; insn
= next
)
2490 next
= NEXT_INSN (insn
);
2493 if (op1
&& GET_CODE (op1
) == REG
)
2494 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op1
,
2497 if (op0
&& GET_CODE (op0
) == REG
)
2498 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op0
,
2502 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2503 != CODE_FOR_nothing
)
2505 last
= emit_move_insn (target
, target
);
2508 = gen_rtx (EXPR_LIST
, REG_EQUAL
, equiv
, REG_NOTES (last
));
2511 last
= get_last_insn ();
2514 first
= get_insns ();
2516 first
= NEXT_INSN (prev
);
2518 /* Encapsulate the block so it gets manipulated as a unit. */
2519 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2521 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2526 /* Emit code to make a call to a constant function or a library call.
2528 INSNS is a list containing all insns emitted in the call.
2529 These insns leave the result in RESULT. Our block is to copy RESULT
2530 to TARGET, which is logically equivalent to EQUIV.
2532 We first emit any insns that set a pseudo on the assumption that these are
2533 loading constants into registers; doing so allows them to be safely cse'ed
2534 between blocks. Then we emit all the other insns in the block, followed by
2535 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2536 note with an operand of EQUIV.
2538 Moving assignments to pseudos outside of the block is done to improve
2539 the generated code, but is not required to generate correct code,
2540 hence being unable to move an assignment is not grounds for not making
2541 a libcall block. There are two reasons why it is safe to leave these
2542 insns inside the block: First, we know that these pseudos cannot be
2543 used in generated RTL outside the block since they are created for
2544 temporary purposes within the block. Second, CSE will not record the
2545 values of anything set inside a libcall block, so we know they must
2546 be dead at the end of the block.
2548 Except for the first group of insns (the ones setting pseudos), the
2549 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2552 emit_libcall_block (insns
, target
, result
, equiv
)
2558 rtx prev
, next
, first
, last
, insn
;
2560 /* First emit all insns that set pseudos. Remove them from the list as
2561 we go. Avoid insns that set pseudos which were referenced in previous
2562 insns. These can be generated by move_by_pieces, for example,
2563 to update an address. Similarly, avoid insns that reference things
2564 set in previous insns. */
2566 for (insn
= insns
; insn
; insn
= next
)
2568 rtx set
= single_set (insn
);
2570 next
= NEXT_INSN (insn
);
2572 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2573 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2575 || (! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
))
2576 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2577 && ! modified_in_p (SET_SRC (set
), insns
)
2578 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2580 if (PREV_INSN (insn
))
2581 NEXT_INSN (PREV_INSN (insn
)) = next
;
2586 PREV_INSN (next
) = PREV_INSN (insn
);
2592 prev
= get_last_insn ();
2594 /* Write the remaining insns followed by the final copy. */
2596 for (insn
= insns
; insn
; insn
= next
)
2598 next
= NEXT_INSN (insn
);
2603 last
= emit_move_insn (target
, result
);
2604 REG_NOTES (last
) = gen_rtx (EXPR_LIST
,
2605 REG_EQUAL
, copy_rtx (equiv
), REG_NOTES (last
));
2608 first
= get_insns ();
2610 first
= NEXT_INSN (prev
);
2612 /* Encapsulate the block so it gets manipulated as a unit. */
2613 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2615 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2618 /* Generate code to store zero in X. */
2624 emit_move_insn (x
, const0_rtx
);
2627 /* Generate code to store 1 in X
2628 assuming it contains zero beforehand. */
2631 emit_0_to_1_insn (x
)
2634 emit_move_insn (x
, const1_rtx
);
2637 /* Generate code to compare X with Y
2638 so that the condition codes are set.
2640 MODE is the mode of the inputs (in case they are const_int).
2641 UNSIGNEDP nonzero says that X and Y are unsigned;
2642 this matters if they need to be widened.
2644 If they have mode BLKmode, then SIZE specifies the size of both X and Y,
2645 and ALIGN specifies the known shared alignment of X and Y.
2647 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
2648 It is ignored for fixed-point and block comparisons;
2649 it is used only for floating-point comparisons. */
2652 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
, align
)
2654 enum rtx_code comparison
;
2656 enum machine_mode mode
;
2660 enum mode_class
class;
2661 enum machine_mode wider_mode
;
2663 class = GET_MODE_CLASS (mode
);
2665 /* They could both be VOIDmode if both args are immediate constants,
2666 but we should fold that at an earlier stage.
2667 With no special code here, this will call abort,
2668 reminding the programmer to implement such folding. */
2670 if (mode
!= BLKmode
&& flag_force_mem
)
2672 x
= force_not_mem (x
);
2673 y
= force_not_mem (y
);
2676 /* If we are inside an appropriately-short loop and one operand is an
2677 expensive constant, force it into a register. */
2678 if (CONSTANT_P (x
) && preserve_subexpressions_p () && rtx_cost (x
, COMPARE
) > 2)
2679 x
= force_reg (mode
, x
);
2681 if (CONSTANT_P (y
) && preserve_subexpressions_p () && rtx_cost (y
, COMPARE
) > 2)
2682 y
= force_reg (mode
, y
);
2684 /* Don't let both operands fail to indicate the mode. */
2685 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
2686 x
= force_reg (mode
, x
);
2688 /* Handle all BLKmode compares. */
2690 if (mode
== BLKmode
)
2693 x
= protect_from_queue (x
, 0);
2694 y
= protect_from_queue (y
, 0);
2698 #ifdef HAVE_cmpstrqi
2700 && GET_CODE (size
) == CONST_INT
2701 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
2703 enum machine_mode result_mode
2704 = insn_operand_mode
[(int) CODE_FOR_cmpstrqi
][0];
2705 rtx result
= gen_reg_rtx (result_mode
);
2706 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, GEN_INT (align
)));
2707 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2712 #ifdef HAVE_cmpstrhi
2714 && GET_CODE (size
) == CONST_INT
2715 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
2717 enum machine_mode result_mode
2718 = insn_operand_mode
[(int) CODE_FOR_cmpstrhi
][0];
2719 rtx result
= gen_reg_rtx (result_mode
);
2720 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, GEN_INT (align
)));
2721 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2726 #ifdef HAVE_cmpstrsi
2729 enum machine_mode result_mode
2730 = insn_operand_mode
[(int) CODE_FOR_cmpstrsi
][0];
2731 rtx result
= gen_reg_rtx (result_mode
);
2732 size
= protect_from_queue (size
, 0);
2733 emit_insn (gen_cmpstrsi (result
, x
, y
,
2734 convert_to_mode (SImode
, size
, 1),
2736 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2742 #ifdef TARGET_MEM_FUNCTIONS
2743 emit_library_call (memcmp_libfunc
, 0,
2744 TYPE_MODE (integer_type_node
), 3,
2745 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2748 emit_library_call (bcmp_libfunc
, 0,
2749 TYPE_MODE (integer_type_node
), 3,
2750 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2753 emit_cmp_insn (hard_libcall_value (TYPE_MODE (integer_type_node
)),
2754 const0_rtx
, comparison
, NULL_RTX
,
2755 TYPE_MODE (integer_type_node
), 0, 0);
2760 /* Handle some compares against zero. */
2762 if (y
== CONST0_RTX (mode
)
2763 && tst_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2765 int icode
= (int) tst_optab
->handlers
[(int) mode
].insn_code
;
2768 x
= protect_from_queue (x
, 0);
2769 y
= protect_from_queue (y
, 0);
2771 /* Now, if insn does accept these operands, put them into pseudos. */
2772 if (! (*insn_operand_predicate
[icode
][0])
2773 (x
, insn_operand_mode
[icode
][0]))
2774 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2776 emit_insn (GEN_FCN (icode
) (x
));
2780 /* Handle compares for which there is a directly suitable insn. */
2782 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2784 int icode
= (int) cmp_optab
->handlers
[(int) mode
].insn_code
;
2787 x
= protect_from_queue (x
, 0);
2788 y
= protect_from_queue (y
, 0);
2790 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2791 if (! (*insn_operand_predicate
[icode
][0])
2792 (x
, insn_operand_mode
[icode
][0]))
2793 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2795 if (! (*insn_operand_predicate
[icode
][1])
2796 (y
, insn_operand_mode
[icode
][1]))
2797 y
= copy_to_mode_reg (insn_operand_mode
[icode
][1], y
);
2799 emit_insn (GEN_FCN (icode
) (x
, y
));
2803 /* Try widening if we can find a direct insn that way. */
2805 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2807 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2808 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2810 if (cmp_optab
->handlers
[(int) wider_mode
].insn_code
2811 != CODE_FOR_nothing
)
2813 x
= protect_from_queue (x
, 0);
2814 y
= protect_from_queue (y
, 0);
2815 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
2816 y
= convert_modes (wider_mode
, mode
, y
, unsignedp
);
2817 emit_cmp_insn (x
, y
, comparison
, NULL_RTX
,
2818 wider_mode
, unsignedp
, align
);
2824 /* Handle a lib call just for the mode we are using. */
2826 if (cmp_optab
->handlers
[(int) mode
].libfunc
2827 && class != MODE_FLOAT
)
2829 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
2830 /* If we want unsigned, and this mode has a distinct unsigned
2831 comparison routine, use that. */
2832 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
2833 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
2835 emit_library_call (libfunc
, 1,
2836 word_mode
, 2, x
, mode
, y
, mode
);
2838 /* Integer comparison returns a result that must be compared against 1,
2839 so that even if we do an unsigned compare afterward,
2840 there is still a value that can represent the result "less than". */
2842 emit_cmp_insn (hard_libcall_value (word_mode
), const1_rtx
,
2843 comparison
, NULL_RTX
, word_mode
, unsignedp
, 0);
2847 if (class == MODE_FLOAT
)
2848 emit_float_lib_cmp (x
, y
, comparison
);
2854 /* Nonzero if a compare of mode MODE can be done straightforwardly
2855 (without splitting it into pieces). */
2858 can_compare_p (mode
)
2859 enum machine_mode mode
;
2863 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
2865 mode
= GET_MODE_WIDER_MODE (mode
);
2866 } while (mode
!= VOIDmode
);
2871 /* Emit a library call comparison between floating point X and Y.
2872 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
2875 emit_float_lib_cmp (x
, y
, comparison
)
2877 enum rtx_code comparison
;
2879 enum machine_mode mode
= GET_MODE (x
);
2886 libfunc
= eqhf2_libfunc
;
2890 libfunc
= nehf2_libfunc
;
2894 libfunc
= gthf2_libfunc
;
2898 libfunc
= gehf2_libfunc
;
2902 libfunc
= lthf2_libfunc
;
2906 libfunc
= lehf2_libfunc
;
2909 else if (mode
== SFmode
)
2913 libfunc
= eqsf2_libfunc
;
2917 libfunc
= nesf2_libfunc
;
2921 libfunc
= gtsf2_libfunc
;
2925 libfunc
= gesf2_libfunc
;
2929 libfunc
= ltsf2_libfunc
;
2933 libfunc
= lesf2_libfunc
;
2936 else if (mode
== DFmode
)
2940 libfunc
= eqdf2_libfunc
;
2944 libfunc
= nedf2_libfunc
;
2948 libfunc
= gtdf2_libfunc
;
2952 libfunc
= gedf2_libfunc
;
2956 libfunc
= ltdf2_libfunc
;
2960 libfunc
= ledf2_libfunc
;
2963 else if (mode
== XFmode
)
2967 libfunc
= eqxf2_libfunc
;
2971 libfunc
= nexf2_libfunc
;
2975 libfunc
= gtxf2_libfunc
;
2979 libfunc
= gexf2_libfunc
;
2983 libfunc
= ltxf2_libfunc
;
2987 libfunc
= lexf2_libfunc
;
2990 else if (mode
== TFmode
)
2994 libfunc
= eqtf2_libfunc
;
2998 libfunc
= netf2_libfunc
;
3002 libfunc
= gttf2_libfunc
;
3006 libfunc
= getf2_libfunc
;
3010 libfunc
= lttf2_libfunc
;
3014 libfunc
= letf2_libfunc
;
3019 enum machine_mode wider_mode
;
3021 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3022 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3024 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3025 != CODE_FOR_nothing
)
3026 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3028 x
= protect_from_queue (x
, 0);
3029 y
= protect_from_queue (y
, 0);
3030 x
= convert_to_mode (wider_mode
, x
, 0);
3031 y
= convert_to_mode (wider_mode
, y
, 0);
3032 emit_float_lib_cmp (x
, y
, comparison
);
3042 emit_library_call (libfunc
, 1,
3043 word_mode
, 2, x
, mode
, y
, mode
);
3045 emit_cmp_insn (hard_libcall_value (word_mode
), const0_rtx
, comparison
,
3046 NULL_RTX
, word_mode
, 0, 0);
3049 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3052 emit_indirect_jump (loc
)
3055 if (! ((*insn_operand_predicate
[(int)CODE_FOR_indirect_jump
][0])
3057 loc
= copy_to_mode_reg (Pmode
, loc
);
3059 emit_jump_insn (gen_indirect_jump (loc
));
3063 #ifdef HAVE_conditional_move
3065 /* Emit a conditional move instruction if the machine supports one for that
3066 condition and machine mode.
3068 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3069 the mode to use should they be constants. If it is VOIDmode, they cannot
3072 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3073 should be stored there. MODE is the mode to use should they be constants.
3074 If it is VOIDmode, they cannot both be constants.
3076 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3077 is not supported. */
3080 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3085 enum machine_mode cmode
;
3087 enum machine_mode mode
;
3090 rtx tem
, subtarget
, comparison
, insn
;
3091 enum insn_code icode
;
3093 /* If one operand is constant, make it the second one. Only do this
3094 if the other operand is not constant as well. */
3096 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
3097 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
3102 code
= swap_condition (code
);
3105 if (cmode
== VOIDmode
)
3106 cmode
= GET_MODE (op0
);
3108 if ((CONSTANT_P (op2
) && ! CONSTANT_P (op3
))
3109 || (GET_CODE (op2
) == CONST_INT
&& GET_CODE (op3
) != CONST_INT
))
3114 /* ??? This may not be appropriate (consider IEEE). Perhaps we should
3115 call can_reverse_comparison_p here and bail out if necessary.
3116 It's not clear whether we need to do this canonicalization though. */
3117 code
= reverse_condition (code
);
3120 if (mode
== VOIDmode
)
3121 mode
= GET_MODE (op2
);
3123 icode
= movcc_gen_code
[mode
];
3125 if (icode
== CODE_FOR_nothing
)
3130 op2
= force_not_mem (op2
);
3131 op3
= force_not_mem (op3
);
3135 target
= protect_from_queue (target
, 1);
3137 target
= gen_reg_rtx (mode
);
3143 op2
= protect_from_queue (op2
, 0);
3144 op3
= protect_from_queue (op3
, 0);
3146 /* If the insn doesn't accept these operands, put them in pseudos. */
3148 if (! (*insn_operand_predicate
[icode
][0])
3149 (subtarget
, insn_operand_mode
[icode
][0]))
3150 subtarget
= gen_reg_rtx (insn_operand_mode
[icode
][0]);
3152 if (! (*insn_operand_predicate
[icode
][2])
3153 (op2
, insn_operand_mode
[icode
][2]))
3154 op2
= copy_to_mode_reg (insn_operand_mode
[icode
][2], op2
);
3156 if (! (*insn_operand_predicate
[icode
][3])
3157 (op3
, insn_operand_mode
[icode
][3]))
3158 op3
= copy_to_mode_reg (insn_operand_mode
[icode
][3], op3
);
3160 /* Everything should now be in the suitable form, so emit the compare insn
3161 and then the conditional move. */
3164 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
, 0);
3166 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3167 if (GET_CODE (comparison
) != code
)
3168 /* This shouldn't happen. */
3171 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3173 /* If that failed, then give up. */
3179 if (subtarget
!= target
)
3180 convert_move (target
, subtarget
, 0);
3185 /* Return non-zero if a conditional move of mode MODE is supported.
3187 This function is for combine so it can tell whether an insn that looks
3188 like a conditional move is actually supported by the hardware. If we
3189 guess wrong we lose a bit on optimization, but that's it. */
3190 /* ??? sparc64 supports conditionally moving integers values based on fp
3191 comparisons, and vice versa. How do we handle them? */
3194 can_conditionally_move_p (mode
)
3195 enum machine_mode mode
;
3197 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3203 #endif /* HAVE_conditional_move */
3205 /* These three functions generate an insn body and return it
3206 rather than emitting the insn.
3208 They do not protect from queued increments,
3209 because they may be used 1) in protect_from_queue itself
3210 and 2) in other passes where there is no queue. */
3212 /* Generate and return an insn body to add Y to X. */
3215 gen_add2_insn (x
, y
)
3218 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3220 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3221 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3222 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3225 return (GEN_FCN (icode
) (x
, x
, y
));
3229 have_add2_insn (mode
)
3230 enum machine_mode mode
;
3232 return add_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3235 /* Generate and return an insn body to subtract Y from X. */
3238 gen_sub2_insn (x
, y
)
3241 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3243 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3244 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3245 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3248 return (GEN_FCN (icode
) (x
, x
, y
));
3252 have_sub2_insn (mode
)
3253 enum machine_mode mode
;
3255 return sub_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3258 /* Generate the body of an instruction to copy Y into X.
3259 It may be a SEQUENCE, if one insn isn't enough. */
3262 gen_move_insn (x
, y
)
3265 register enum machine_mode mode
= GET_MODE (x
);
3266 enum insn_code insn_code
;
3269 if (mode
== VOIDmode
)
3270 mode
= GET_MODE (y
);
3272 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
3274 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
3275 find a mode to do it in. If we have a movcc, use it. Otherwise,
3276 find the MODE_INT mode of the same width. */
3278 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
3280 enum machine_mode tmode
= VOIDmode
;
3284 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
3287 for (tmode
= QImode
; tmode
!= VOIDmode
;
3288 tmode
= GET_MODE_WIDER_MODE (tmode
))
3289 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
3292 if (tmode
== VOIDmode
)
3295 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
3296 may call change_address which is not appropriate if we were
3297 called when a reload was in progress. We don't have to worry
3298 about changing the address since the size in bytes is supposed to
3299 be the same. Copy the MEM to change the mode and move any
3300 substitutions from the old MEM to the new one. */
3302 if (reload_in_progress
)
3304 x
= gen_lowpart_common (tmode
, x1
);
3305 if (x
== 0 && GET_CODE (x1
) == MEM
)
3307 x
= gen_rtx (MEM
, tmode
, XEXP (x1
, 0));
3308 RTX_UNCHANGING_P (x
) = RTX_UNCHANGING_P (x1
);
3309 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (x1
);
3310 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (x1
);
3311 copy_replacements (x1
, x
);
3314 y
= gen_lowpart_common (tmode
, y1
);
3315 if (y
== 0 && GET_CODE (y1
) == MEM
)
3317 y
= gen_rtx (MEM
, tmode
, XEXP (y1
, 0));
3318 RTX_UNCHANGING_P (y
) = RTX_UNCHANGING_P (y1
);
3319 MEM_IN_STRUCT_P (y
) = MEM_IN_STRUCT_P (y1
);
3320 MEM_VOLATILE_P (y
) = MEM_VOLATILE_P (y1
);
3321 copy_replacements (y1
, y
);
3326 x
= gen_lowpart (tmode
, x
);
3327 y
= gen_lowpart (tmode
, y
);
3330 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
3331 return (GEN_FCN (insn_code
) (x
, y
));
3335 emit_move_insn_1 (x
, y
);
3336 seq
= gen_sequence ();
3341 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3342 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3343 no such operation exists, CODE_FOR_nothing will be returned. */
3346 can_extend_p (to_mode
, from_mode
, unsignedp
)
3347 enum machine_mode to_mode
, from_mode
;
3350 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
];
3353 /* Generate the body of an insn to extend Y (with mode MFROM)
3354 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3357 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
3359 enum machine_mode mto
, mfrom
;
3362 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
]) (x
, y
));
3365 /* can_fix_p and can_float_p say whether the target machine
3366 can directly convert a given fixed point type to
3367 a given floating point type, or vice versa.
3368 The returned value is the CODE_FOR_... value to use,
3369 or CODE_FOR_nothing if these modes cannot be directly converted.
3371 *TRUNCP_PTR is set to 1 if it is necessary to output
3372 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3374 static enum insn_code
3375 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
3376 enum machine_mode fltmode
, fixmode
;
3381 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
] != CODE_FOR_nothing
)
3382 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3384 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
3387 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3389 return CODE_FOR_nothing
;
3392 static enum insn_code
3393 can_float_p (fltmode
, fixmode
, unsignedp
)
3394 enum machine_mode fixmode
, fltmode
;
3397 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3400 /* Generate code to convert FROM to floating point
3401 and store in TO. FROM must be fixed point and not VOIDmode.
3402 UNSIGNEDP nonzero means regard FROM as unsigned.
3403 Normally this is done by correcting the final value
3404 if it is negative. */
3407 expand_float (to
, from
, unsignedp
)
3411 enum insn_code icode
;
3412 register rtx target
= to
;
3413 enum machine_mode fmode
, imode
;
3415 /* Crash now, because we won't be able to decide which mode to use. */
3416 if (GET_MODE (from
) == VOIDmode
)
3419 /* Look for an insn to do the conversion. Do it in the specified
3420 modes if possible; otherwise convert either input, output or both to
3421 wider mode. If the integer mode is wider than the mode of FROM,
3422 we can do the conversion signed even if the input is unsigned. */
3424 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
3425 imode
= GET_MODE_WIDER_MODE (imode
))
3426 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3427 fmode
= GET_MODE_WIDER_MODE (fmode
))
3429 int doing_unsigned
= unsignedp
;
3431 icode
= can_float_p (fmode
, imode
, unsignedp
);
3432 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
3433 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
3435 if (icode
!= CODE_FOR_nothing
)
3437 to
= protect_from_queue (to
, 1);
3438 from
= protect_from_queue (from
, 0);
3440 if (imode
!= GET_MODE (from
))
3441 from
= convert_to_mode (imode
, from
, unsignedp
);
3443 if (fmode
!= GET_MODE (to
))
3444 target
= gen_reg_rtx (fmode
);
3446 emit_unop_insn (icode
, target
, from
,
3447 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
3450 convert_move (to
, target
, 0);
3455 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3457 /* Unsigned integer, and no way to convert directly.
3458 Convert as signed, then conditionally adjust the result. */
3461 rtx label
= gen_label_rtx ();
3463 REAL_VALUE_TYPE offset
;
3467 to
= protect_from_queue (to
, 1);
3468 from
= protect_from_queue (from
, 0);
3471 from
= force_not_mem (from
);
3473 /* Look for a usable floating mode FMODE wider than the source and at
3474 least as wide as the target. Using FMODE will avoid rounding woes
3475 with unsigned values greater than the signed maximum value. */
3477 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3478 fmode
= GET_MODE_WIDER_MODE (fmode
))
3479 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
3480 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
3483 if (fmode
== VOIDmode
)
3485 /* There is no such mode. Pretend the target is wide enough. */
3486 fmode
= GET_MODE (to
);
3488 /* Avoid double-rounding when TO is narrower than FROM. */
3489 if ((significand_size (fmode
) + 1)
3490 < GET_MODE_BITSIZE (GET_MODE (from
)))
3493 rtx neglabel
= gen_label_rtx ();
3495 /* Don't use TARGET if it isn't a register, is a hard register,
3496 or is the wrong mode. */
3497 if (GET_CODE (target
) != REG
3498 || REGNO (target
) < FIRST_PSEUDO_REGISTER
3499 || GET_MODE (target
) != fmode
)
3500 target
= gen_reg_rtx (fmode
);
3502 imode
= GET_MODE (from
);
3503 do_pending_stack_adjust ();
3505 /* Test whether the sign bit is set. */
3506 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, imode
, 0, 0);
3507 emit_jump_insn (gen_blt (neglabel
));
3509 /* The sign bit is not set. Convert as signed. */
3510 expand_float (target
, from
, 0);
3511 emit_jump_insn (gen_jump (label
));
3513 /* The sign bit is set.
3514 Convert to a usable (positive signed) value by shifting right
3515 one bit, while remembering if a nonzero bit was shifted
3516 out; i.e., compute (from & 1) | (from >> 1). */
3518 emit_label (neglabel
);
3519 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
3520 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3521 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
3523 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
3525 expand_float (target
, temp
, 0);
3527 /* Multiply by 2 to undo the shift above. */
3528 temp
= expand_binop (fmode
, add_optab
, target
, target
,
3529 target
, 0, OPTAB_LIB_WIDEN
);
3531 emit_move_insn (target
, temp
);
3533 do_pending_stack_adjust ();
3539 /* If we are about to do some arithmetic to correct for an
3540 unsigned operand, do it in a pseudo-register. */
3542 if (GET_MODE (to
) != fmode
3543 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
3544 target
= gen_reg_rtx (fmode
);
3546 /* Convert as signed integer to floating. */
3547 expand_float (target
, from
, 0);
3549 /* If FROM is negative (and therefore TO is negative),
3550 correct its value by 2**bitwidth. */
3552 do_pending_stack_adjust ();
3553 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3554 emit_jump_insn (gen_bge (label
));
3556 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
3557 Rather than setting up a dconst_dot_5, let's hope SCO
3559 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
3560 temp
= expand_binop (fmode
, add_optab
, target
,
3561 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
3562 target
, 0, OPTAB_LIB_WIDEN
);
3564 emit_move_insn (target
, temp
);
3566 do_pending_stack_adjust ();
3572 /* No hardware instruction available; call a library routine to convert from
3573 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
3579 to
= protect_from_queue (to
, 1);
3580 from
= protect_from_queue (from
, 0);
3582 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
3583 from
= convert_to_mode (SImode
, from
, unsignedp
);
3586 from
= force_not_mem (from
);
3588 if (GET_MODE (to
) == SFmode
)
3590 if (GET_MODE (from
) == SImode
)
3591 libfcn
= floatsisf_libfunc
;
3592 else if (GET_MODE (from
) == DImode
)
3593 libfcn
= floatdisf_libfunc
;
3594 else if (GET_MODE (from
) == TImode
)
3595 libfcn
= floattisf_libfunc
;
3599 else if (GET_MODE (to
) == DFmode
)
3601 if (GET_MODE (from
) == SImode
)
3602 libfcn
= floatsidf_libfunc
;
3603 else if (GET_MODE (from
) == DImode
)
3604 libfcn
= floatdidf_libfunc
;
3605 else if (GET_MODE (from
) == TImode
)
3606 libfcn
= floattidf_libfunc
;
3610 else if (GET_MODE (to
) == XFmode
)
3612 if (GET_MODE (from
) == SImode
)
3613 libfcn
= floatsixf_libfunc
;
3614 else if (GET_MODE (from
) == DImode
)
3615 libfcn
= floatdixf_libfunc
;
3616 else if (GET_MODE (from
) == TImode
)
3617 libfcn
= floattixf_libfunc
;
3621 else if (GET_MODE (to
) == TFmode
)
3623 if (GET_MODE (from
) == SImode
)
3624 libfcn
= floatsitf_libfunc
;
3625 else if (GET_MODE (from
) == DImode
)
3626 libfcn
= floatditf_libfunc
;
3627 else if (GET_MODE (from
) == TImode
)
3628 libfcn
= floattitf_libfunc
;
3637 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1,
3639 1, from
, GET_MODE (from
));
3640 insns
= get_insns ();
3643 emit_libcall_block (insns
, target
, value
,
3644 gen_rtx (FLOAT
, GET_MODE (to
), from
));
3649 /* Copy result to requested destination
3650 if we have been computing in a temp location. */
3654 if (GET_MODE (target
) == GET_MODE (to
))
3655 emit_move_insn (to
, target
);
3657 convert_move (to
, target
, 0);
3661 /* expand_fix: generate code to convert FROM to fixed point
3662 and store in TO. FROM must be floating point. */
3668 rtx temp
= gen_reg_rtx (GET_MODE (x
));
3669 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
3673 expand_fix (to
, from
, unsignedp
)
3674 register rtx to
, from
;
3677 enum insn_code icode
;
3678 register rtx target
= to
;
3679 enum machine_mode fmode
, imode
;
3683 /* We first try to find a pair of modes, one real and one integer, at
3684 least as wide as FROM and TO, respectively, in which we can open-code
3685 this conversion. If the integer mode is wider than the mode of TO,
3686 we can do the conversion either signed or unsigned. */
3688 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
3689 imode
= GET_MODE_WIDER_MODE (imode
))
3690 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3691 fmode
= GET_MODE_WIDER_MODE (fmode
))
3693 int doing_unsigned
= unsignedp
;
3695 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
3696 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
3697 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
3699 if (icode
!= CODE_FOR_nothing
)
3701 to
= protect_from_queue (to
, 1);
3702 from
= protect_from_queue (from
, 0);
3704 if (fmode
!= GET_MODE (from
))
3705 from
= convert_to_mode (fmode
, from
, 0);
3708 from
= ftruncify (from
);
3710 if (imode
!= GET_MODE (to
))
3711 target
= gen_reg_rtx (imode
);
3713 emit_unop_insn (icode
, target
, from
,
3714 doing_unsigned
? UNSIGNED_FIX
: FIX
);
3716 convert_move (to
, target
, unsignedp
);
3721 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3722 /* For an unsigned conversion, there is one more way to do it.
3723 If we have a signed conversion, we generate code that compares
3724 the real value to the largest representable positive number. If if
3725 is smaller, the conversion is done normally. Otherwise, subtract
3726 one plus the highest signed number, convert, and add it back.
3728 We only need to check all real modes, since we know we didn't find
3729 anything with a wider integer mode. */
3731 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
3732 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3733 fmode
= GET_MODE_WIDER_MODE (fmode
))
3734 /* Make sure we won't lose significant bits doing this. */
3735 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
3736 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
3740 REAL_VALUE_TYPE offset
;
3741 rtx limit
, lab1
, lab2
, insn
;
3743 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
3744 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
3745 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
3746 lab1
= gen_label_rtx ();
3747 lab2
= gen_label_rtx ();
3750 to
= protect_from_queue (to
, 1);
3751 from
= protect_from_queue (from
, 0);
3754 from
= force_not_mem (from
);
3756 if (fmode
!= GET_MODE (from
))
3757 from
= convert_to_mode (fmode
, from
, 0);
3759 /* See if we need to do the subtraction. */
3760 do_pending_stack_adjust ();
3761 emit_cmp_insn (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3762 emit_jump_insn (gen_bge (lab1
));
3764 /* If not, do the signed "fix" and branch around fixup code. */
3765 expand_fix (to
, from
, 0);
3766 emit_jump_insn (gen_jump (lab2
));
3769 /* Otherwise, subtract 2**(N-1), convert to signed number,
3770 then add 2**(N-1). Do the addition using XOR since this
3771 will often generate better code. */
3773 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
3774 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3775 expand_fix (to
, target
, 0);
3776 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
3777 GEN_INT ((HOST_WIDE_INT
) 1 << (bitsize
- 1)),
3778 to
, 1, OPTAB_LIB_WIDEN
);
3781 emit_move_insn (to
, target
);
3785 /* Make a place for a REG_NOTE and add it. */
3786 insn
= emit_move_insn (to
, to
);
3787 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3788 gen_rtx (UNSIGNED_FIX
, GET_MODE (to
),
3796 /* We can't do it with an insn, so use a library call. But first ensure
3797 that the mode of TO is at least as wide as SImode, since those are the
3798 only library calls we know about. */
3800 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
3802 target
= gen_reg_rtx (SImode
);
3804 expand_fix (target
, from
, unsignedp
);
3806 else if (GET_MODE (from
) == SFmode
)
3808 if (GET_MODE (to
) == SImode
)
3809 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
3810 else if (GET_MODE (to
) == DImode
)
3811 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
3812 else if (GET_MODE (to
) == TImode
)
3813 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
3817 else if (GET_MODE (from
) == DFmode
)
3819 if (GET_MODE (to
) == SImode
)
3820 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
3821 else if (GET_MODE (to
) == DImode
)
3822 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
3823 else if (GET_MODE (to
) == TImode
)
3824 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
3828 else if (GET_MODE (from
) == XFmode
)
3830 if (GET_MODE (to
) == SImode
)
3831 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
3832 else if (GET_MODE (to
) == DImode
)
3833 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
3834 else if (GET_MODE (to
) == TImode
)
3835 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
3839 else if (GET_MODE (from
) == TFmode
)
3841 if (GET_MODE (to
) == SImode
)
3842 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
3843 else if (GET_MODE (to
) == DImode
)
3844 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
3845 else if (GET_MODE (to
) == TImode
)
3846 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
3858 to
= protect_from_queue (to
, 1);
3859 from
= protect_from_queue (from
, 0);
3862 from
= force_not_mem (from
);
3866 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1, GET_MODE (to
),
3868 1, from
, GET_MODE (from
));
3869 insns
= get_insns ();
3872 emit_libcall_block (insns
, target
, value
,
3873 gen_rtx (unsignedp
? UNSIGNED_FIX
: FIX
,
3874 GET_MODE (to
), from
));
3877 if (GET_MODE (to
) == GET_MODE (target
))
3878 emit_move_insn (to
, target
);
3880 convert_move (to
, target
, 0);
3888 optab op
= (optab
) xmalloc (sizeof (struct optab
));
3890 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
3892 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
3893 op
->handlers
[i
].libfunc
= 0;
3896 if (code
!= UNKNOWN
)
3897 code_to_optab
[(int) code
] = op
;
3902 /* Initialize the libfunc fields of an entire group of entries in some
3903 optab. Each entry is set equal to a string consisting of a leading
3904 pair of underscores followed by a generic operation name followed by
3905 a mode name (downshifted to lower case) followed by a single character
3906 representing the number of operands for the given operation (which is
3907 usually one of the characters '2', '3', or '4').
3909 OPTABLE is the table in which libfunc fields are to be initialized.
3910 FIRST_MODE is the first machine mode index in the given optab to
3912 LAST_MODE is the last machine mode index in the given optab to
3914 OPNAME is the generic (string) name of the operation.
3915 SUFFIX is the character which specifies the number of operands for
3916 the given generic operation.
3920 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
3921 register optab optable
;
3922 register int first_mode
;
3923 register int last_mode
;
3924 register char *opname
;
3925 register char suffix
;
3928 register unsigned opname_len
= strlen (opname
);
3930 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
3931 mode
= (enum machine_mode
) ((int) mode
+ 1))
3933 register char *mname
= mode_name
[(int) mode
];
3934 register unsigned mname_len
= strlen (mname
);
3935 register char *libfunc_name
3936 = (char *) xmalloc (2 + opname_len
+ mname_len
+ 1 + 1);
3943 for (q
= opname
; *q
; )
3945 for (q
= mname
; *q
; q
++)
3946 *p
++ = tolower (*q
);
3949 optable
->handlers
[(int) mode
].libfunc
3950 = gen_rtx (SYMBOL_REF
, Pmode
, libfunc_name
);
3954 /* Initialize the libfunc fields of an entire group of entries in some
3955 optab which correspond to all integer mode operations. The parameters
3956 have the same meaning as similarly named ones for the `init_libfuncs'
3957 routine. (See above). */
3960 init_integral_libfuncs (optable
, opname
, suffix
)
3961 register optab optable
;
3962 register char *opname
;
3963 register char suffix
;
3965 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
3968 /* Initialize the libfunc fields of an entire group of entries in some
3969 optab which correspond to all real mode operations. The parameters
3970 have the same meaning as similarly named ones for the `init_libfuncs'
3971 routine. (See above). */
3974 init_floating_libfuncs (optable
, opname
, suffix
)
3975 register optab optable
;
3976 register char *opname
;
3977 register char suffix
;
3979 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
3982 /* Initialize the libfunc fields of an entire group of entries in some
3983 optab which correspond to all complex floating modes. The parameters
3984 have the same meaning as similarly named ones for the `init_libfuncs'
3985 routine. (See above). */
3988 init_complex_libfuncs (optable
, opname
, suffix
)
3989 register optab optable
;
3990 register char *opname
;
3991 register char suffix
;
3993 init_libfuncs (optable
, SCmode
, TCmode
, opname
, suffix
);
3996 /* Call this once to initialize the contents of the optabs
3997 appropriately for the current target machine. */
4005 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4007 for (p
= fixtab
[0][0];
4008 p
< fixtab
[0][0] + sizeof fixtab
/ sizeof (fixtab
[0][0][0]);
4010 *p
= CODE_FOR_nothing
;
4012 for (p
= fixtrunctab
[0][0];
4013 p
< fixtrunctab
[0][0] + sizeof fixtrunctab
/ sizeof (fixtrunctab
[0][0][0]);
4015 *p
= CODE_FOR_nothing
;
4017 for (p
= floattab
[0][0];
4018 p
< floattab
[0][0] + sizeof floattab
/ sizeof (floattab
[0][0][0]);
4020 *p
= CODE_FOR_nothing
;
4022 for (p
= extendtab
[0][0];
4023 p
< extendtab
[0][0] + sizeof extendtab
/ sizeof extendtab
[0][0][0];
4025 *p
= CODE_FOR_nothing
;
4027 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4028 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4030 #ifdef HAVE_conditional_move
4031 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4032 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4035 add_optab
= init_optab (PLUS
);
4036 sub_optab
= init_optab (MINUS
);
4037 smul_optab
= init_optab (MULT
);
4038 smul_highpart_optab
= init_optab (UNKNOWN
);
4039 umul_highpart_optab
= init_optab (UNKNOWN
);
4040 smul_widen_optab
= init_optab (UNKNOWN
);
4041 umul_widen_optab
= init_optab (UNKNOWN
);
4042 sdiv_optab
= init_optab (DIV
);
4043 sdivmod_optab
= init_optab (UNKNOWN
);
4044 udiv_optab
= init_optab (UDIV
);
4045 udivmod_optab
= init_optab (UNKNOWN
);
4046 smod_optab
= init_optab (MOD
);
4047 umod_optab
= init_optab (UMOD
);
4048 flodiv_optab
= init_optab (DIV
);
4049 ftrunc_optab
= init_optab (UNKNOWN
);
4050 and_optab
= init_optab (AND
);
4051 ior_optab
= init_optab (IOR
);
4052 xor_optab
= init_optab (XOR
);
4053 ashl_optab
= init_optab (ASHIFT
);
4054 ashr_optab
= init_optab (ASHIFTRT
);
4055 lshr_optab
= init_optab (LSHIFTRT
);
4056 rotl_optab
= init_optab (ROTATE
);
4057 rotr_optab
= init_optab (ROTATERT
);
4058 smin_optab
= init_optab (SMIN
);
4059 smax_optab
= init_optab (SMAX
);
4060 umin_optab
= init_optab (UMIN
);
4061 umax_optab
= init_optab (UMAX
);
4062 mov_optab
= init_optab (UNKNOWN
);
4063 movstrict_optab
= init_optab (UNKNOWN
);
4064 cmp_optab
= init_optab (UNKNOWN
);
4065 ucmp_optab
= init_optab (UNKNOWN
);
4066 tst_optab
= init_optab (UNKNOWN
);
4067 neg_optab
= init_optab (NEG
);
4068 abs_optab
= init_optab (ABS
);
4069 one_cmpl_optab
= init_optab (NOT
);
4070 ffs_optab
= init_optab (FFS
);
4071 sqrt_optab
= init_optab (SQRT
);
4072 sin_optab
= init_optab (UNKNOWN
);
4073 cos_optab
= init_optab (UNKNOWN
);
4074 strlen_optab
= init_optab (UNKNOWN
);
4076 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4078 movstr_optab
[i
] = CODE_FOR_nothing
;
4080 #ifdef HAVE_SECONDARY_RELOADS
4081 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4085 /* Fill in the optabs with the insns we support. */
4088 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4089 /* This flag says the same insns that convert to a signed fixnum
4090 also convert validly to an unsigned one. */
4091 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4092 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4093 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4096 #ifdef EXTRA_CC_MODES
4100 /* Initialize the optabs with the names of the library functions. */
4101 init_integral_libfuncs (add_optab
, "add", '3');
4102 init_floating_libfuncs (add_optab
, "add", '3');
4103 init_integral_libfuncs (sub_optab
, "sub", '3');
4104 init_floating_libfuncs (sub_optab
, "sub", '3');
4105 init_integral_libfuncs (smul_optab
, "mul", '3');
4106 init_floating_libfuncs (smul_optab
, "mul", '3');
4107 init_integral_libfuncs (sdiv_optab
, "div", '3');
4108 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4109 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4110 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4111 init_integral_libfuncs (smod_optab
, "mod", '3');
4112 init_integral_libfuncs (umod_optab
, "umod", '3');
4113 init_floating_libfuncs (flodiv_optab
, "div", '3');
4114 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4115 init_integral_libfuncs (and_optab
, "and", '3');
4116 init_integral_libfuncs (ior_optab
, "ior", '3');
4117 init_integral_libfuncs (xor_optab
, "xor", '3');
4118 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4119 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4120 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4121 init_integral_libfuncs (smin_optab
, "min", '3');
4122 init_floating_libfuncs (smin_optab
, "min", '3');
4123 init_integral_libfuncs (smax_optab
, "max", '3');
4124 init_floating_libfuncs (smax_optab
, "max", '3');
4125 init_integral_libfuncs (umin_optab
, "umin", '3');
4126 init_integral_libfuncs (umax_optab
, "umax", '3');
4127 init_integral_libfuncs (neg_optab
, "neg", '2');
4128 init_floating_libfuncs (neg_optab
, "neg", '2');
4129 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4130 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4132 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
4133 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4134 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4135 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4137 #ifdef MULSI3_LIBCALL
4138 smul_optab
->handlers
[(int) SImode
].libfunc
4139 = gen_rtx (SYMBOL_REF
, Pmode
, MULSI3_LIBCALL
);
4141 #ifdef MULDI3_LIBCALL
4142 smul_optab
->handlers
[(int) DImode
].libfunc
4143 = gen_rtx (SYMBOL_REF
, Pmode
, MULDI3_LIBCALL
);
4145 #ifdef MULTI3_LIBCALL
4146 smul_optab
->handlers
[(int) TImode
].libfunc
4147 = gen_rtx (SYMBOL_REF
, Pmode
, MULTI3_LIBCALL
);
4150 #ifdef DIVSI3_LIBCALL
4151 sdiv_optab
->handlers
[(int) SImode
].libfunc
4152 = gen_rtx (SYMBOL_REF
, Pmode
, DIVSI3_LIBCALL
);
4154 #ifdef DIVDI3_LIBCALL
4155 sdiv_optab
->handlers
[(int) DImode
].libfunc
4156 = gen_rtx (SYMBOL_REF
, Pmode
, DIVDI3_LIBCALL
);
4158 #ifdef DIVTI3_LIBCALL
4159 sdiv_optab
->handlers
[(int) TImode
].libfunc
4160 = gen_rtx (SYMBOL_REF
, Pmode
, DIVTI3_LIBCALL
);
4163 #ifdef UDIVSI3_LIBCALL
4164 udiv_optab
->handlers
[(int) SImode
].libfunc
4165 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVSI3_LIBCALL
);
4167 #ifdef UDIVDI3_LIBCALL
4168 udiv_optab
->handlers
[(int) DImode
].libfunc
4169 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVDI3_LIBCALL
);
4171 #ifdef UDIVTI3_LIBCALL
4172 udiv_optab
->handlers
[(int) TImode
].libfunc
4173 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVTI3_LIBCALL
);
4177 #ifdef MODSI3_LIBCALL
4178 smod_optab
->handlers
[(int) SImode
].libfunc
4179 = gen_rtx (SYMBOL_REF
, Pmode
, MODSI3_LIBCALL
);
4181 #ifdef MODDI3_LIBCALL
4182 smod_optab
->handlers
[(int) DImode
].libfunc
4183 = gen_rtx (SYMBOL_REF
, Pmode
, MODDI3_LIBCALL
);
4185 #ifdef MODTI3_LIBCALL
4186 smod_optab
->handlers
[(int) TImode
].libfunc
4187 = gen_rtx (SYMBOL_REF
, Pmode
, MODTI3_LIBCALL
);
4191 #ifdef UMODSI3_LIBCALL
4192 umod_optab
->handlers
[(int) SImode
].libfunc
4193 = gen_rtx (SYMBOL_REF
, Pmode
, UMODSI3_LIBCALL
);
4195 #ifdef UMODDI3_LIBCALL
4196 umod_optab
->handlers
[(int) DImode
].libfunc
4197 = gen_rtx (SYMBOL_REF
, Pmode
, UMODDI3_LIBCALL
);
4199 #ifdef UMODTI3_LIBCALL
4200 umod_optab
->handlers
[(int) TImode
].libfunc
4201 = gen_rtx (SYMBOL_REF
, Pmode
, UMODTI3_LIBCALL
);
4204 /* Define library calls for quad FP instructions */
4205 #ifdef ADDTF3_LIBCALL
4206 add_optab
->handlers
[(int) TFmode
].libfunc
4207 = gen_rtx (SYMBOL_REF
, Pmode
, ADDTF3_LIBCALL
);
4209 #ifdef SUBTF3_LIBCALL
4210 sub_optab
->handlers
[(int) TFmode
].libfunc
4211 = gen_rtx (SYMBOL_REF
, Pmode
, SUBTF3_LIBCALL
);
4213 #ifdef MULTF3_LIBCALL
4214 smul_optab
->handlers
[(int) TFmode
].libfunc
4215 = gen_rtx (SYMBOL_REF
, Pmode
, MULTF3_LIBCALL
);
4217 #ifdef DIVTF3_LIBCALL
4218 flodiv_optab
->handlers
[(int) TFmode
].libfunc
4219 = gen_rtx (SYMBOL_REF
, Pmode
, DIVTF3_LIBCALL
);
4221 #ifdef SQRTTF2_LIBCALL
4222 sqrt_optab
->handlers
[(int) TFmode
].libfunc
4223 = gen_rtx (SYMBOL_REF
, Pmode
, SQRTTF2_LIBCALL
);
4226 /* Use cabs for DC complex abs, since systems generally have cabs.
4227 Don't define any libcall for SCmode, so that cabs will be used. */
4228 abs_optab
->handlers
[(int) DCmode
].libfunc
4229 = gen_rtx (SYMBOL_REF
, Pmode
, "cabs");
4231 /* The ffs function operates on `int'. */
4232 #ifndef INT_TYPE_SIZE
4233 #define INT_TYPE_SIZE BITS_PER_WORD
4235 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)] .libfunc
4236 = gen_rtx (SYMBOL_REF
, Pmode
, "ffs");
4238 extendsfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfdf2");
4239 extendsfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfxf2");
4240 extendsftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsftf2");
4241 extenddfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddfxf2");
4242 extenddftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddftf2");
4244 truncdfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncdfsf2");
4245 truncxfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfsf2");
4246 trunctfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfsf2");
4247 truncxfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfdf2");
4248 trunctfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfdf2");
4250 memcpy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcpy");
4251 bcopy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bcopy");
4252 memcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcmp");
4253 bcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gcc_bcmp");
4254 memset_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memset");
4255 bzero_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bzero");
4257 eqhf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqhf2");
4258 nehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nehf2");
4259 gthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gthf2");
4260 gehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gehf2");
4261 lthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lthf2");
4262 lehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lehf2");
4264 eqsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqsf2");
4265 nesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nesf2");
4266 gtsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtsf2");
4267 gesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gesf2");
4268 ltsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltsf2");
4269 lesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lesf2");
4271 eqdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqdf2");
4272 nedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nedf2");
4273 gtdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtdf2");
4274 gedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gedf2");
4275 ltdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltdf2");
4276 ledf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ledf2");
4278 eqxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqxf2");
4279 nexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nexf2");
4280 gtxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtxf2");
4281 gexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gexf2");
4282 ltxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltxf2");
4283 lexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lexf2");
4285 eqtf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqtf2");
4286 netf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__netf2");
4287 gttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gttf2");
4288 getf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__getf2");
4289 lttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lttf2");
4290 letf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__letf2");
4292 /* Define library calls for quad FP instructions */
4293 #ifdef EQTF2_LIBCALL
4294 eqtf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, EQTF2_LIBCALL
);
4296 #ifdef NETF2_LIBCALL
4297 netf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, NETF2_LIBCALL
);
4299 #ifdef GTTF2_LIBCALL
4300 gttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, GTTF2_LIBCALL
);
4302 #ifdef GETF2_LIBCALL
4303 getf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, GETF2_LIBCALL
);
4305 #ifdef LTTF2_LIBCALL
4306 lttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, LTTF2_LIBCALL
);
4308 #ifdef LETF2_LIBCALL
4309 letf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, LETF2_LIBCALL
);
4312 floatsisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsisf");
4313 floatdisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdisf");
4314 floattisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattisf");
4316 floatsidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsidf");
4317 floatdidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdidf");
4318 floattidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattidf");
4320 floatsixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsixf");
4321 floatdixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdixf");
4322 floattixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattixf");
4324 floatsitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsitf");
4325 floatditf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatditf");
4326 floattitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattitf");
4328 fixsfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfsi");
4329 fixsfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfdi");
4330 fixsfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfti");
4332 fixdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfsi");
4333 fixdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfdi");
4334 fixdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfti");
4336 fixxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfsi");
4337 fixxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfdi");
4338 fixxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfti");
4340 fixtfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfsi");
4341 fixtfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfdi");
4342 fixtfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfti");
4344 fixunssfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfsi");
4345 fixunssfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfdi");
4346 fixunssfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfti");
4348 fixunsdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfsi");
4349 fixunsdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfdi");
4350 fixunsdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfti");
4352 fixunsxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfsi");
4353 fixunsxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfdi");
4354 fixunsxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfti");
4356 fixunstfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfsi");
4357 fixunstfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfdi");
4358 fixunstfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfti");
4360 /* Define library calls for quad FP instructions */
4361 #ifdef TRUNCTFSF2_LIBCALL
4362 trunctfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, TRUNCTFSF2_LIBCALL
);
4364 #ifdef TRUNCTFDF2_LIBCALL
4365 trunctfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, TRUNCTFDF2_LIBCALL
);
4367 #ifdef EXTENDSFTF2_LIBCALL
4368 extendsftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, EXTENDSFTF2_LIBCALL
);
4370 #ifdef EXTENDDFTF2_LIBCALL
4371 extenddftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, EXTENDDFTF2_LIBCALL
);
4373 #ifdef FLOATSITF2_LIBCALL
4374 floatsitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, FLOATSITF2_LIBCALL
);
4376 #ifdef FIX_TRUNCTFSI2_LIBCALL
4377 fixtfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, FIX_TRUNCTFSI2_LIBCALL
);
4379 #ifdef FIXUNS_TRUNCTFSI2_LIBCALL
4380 fixunstfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, FIXUNS_TRUNCTFSI2_LIBCALL
);
4383 #ifdef INIT_TARGET_OPTABS
4384 /* Allow the target to add more libcalls or rename some, etc. */
4391 /* SCO 3.2 apparently has a broken ldexp. */
4404 #endif /* BROKEN_LDEXP */