1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 /* Include insn-config.h before expr.h so that HAVE_conditional_move
28 is properly defined. */
29 #include "insn-config.h"
44 /* Each optab contains info on how this target machine
45 can perform a particular operation
46 for all sizes and kinds of operands.
48 The operation to be performed is often specified
49 by passing one of these optabs as an argument.
51 See expr.h for documentation of these optabs. */
53 optab optab_table
[OTI_MAX
];
55 rtx libfunc_table
[LTI_MAX
];
57 /* Tables of patterns for extending one integer mode to another. */
58 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
60 /* Tables of patterns for converting between fixed and floating point. */
61 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
62 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
63 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab
[NUM_RTX_CODE
+ 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
88 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
89 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
90 enum machine_mode
, int, int));
91 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
92 rtx
, rtx
, enum machine_mode
,
93 int, enum optab_methods
,
94 enum mode_class
, optab
));
95 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
96 rtx
, rtx
, enum machine_mode
,
97 int, enum optab_methods
,
98 enum mode_class
, optab
));
99 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
100 enum machine_mode
*, int *,
101 enum can_compare_purpose
));
102 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
104 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
107 static rtx ftruncify
PARAMS ((rtx
));
108 static optab new_optab
PARAMS ((void));
109 static inline optab init_optab
PARAMS ((enum rtx_code
));
110 static inline optab init_optabv
PARAMS ((enum rtx_code
));
111 static inline int complex_part_zero_p
PARAMS ((rtx
, enum mode_class
,
113 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
114 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
115 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
116 #ifdef HAVE_conditional_trap
117 static void init_traps
PARAMS ((void));
119 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
));
121 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *));
124 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
125 the result of operation CODE applied to OP0 (and OP1 if it is a binary
128 If the last insn does not set TARGET, don't do anything, but return 1.
130 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
131 don't add the REG_EQUAL note but return 0. Our caller can then try
132 again, ensuring that TARGET is not one of the operands. */
135 add_equal_note (seq
, target
, code
, op0
, op1
)
145 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
146 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
147 || GET_CODE (seq
) != SEQUENCE
148 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
149 || GET_CODE (target
) == ZERO_EXTRACT
150 || (! rtx_equal_p (SET_DEST (set
), target
)
151 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
153 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
154 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
158 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
159 besides the last insn. */
160 if (reg_overlap_mentioned_p (target
, op0
)
161 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
162 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
163 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
166 if (GET_RTX_CLASS (code
) == '1')
167 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
169 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
171 set_unique_reg_note (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1), REG_EQUAL
, note
);
176 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
177 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
178 not actually do a sign-extend or zero-extend, but can leave the
179 higher-order bits of the result rtx undefined, for example, in the case
180 of logical operations, but not right shifts. */
183 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
185 enum machine_mode mode
, oldmode
;
191 /* If we don't have to extend and this is a constant, return it. */
192 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
195 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
196 extend since it will be more efficient to do so unless the signedness of
197 a promoted object differs from our extension. */
199 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
200 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
201 return convert_modes (mode
, oldmode
, op
, unsignedp
);
203 /* If MODE is no wider than a single word, we return a paradoxical
205 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
206 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
208 /* Otherwise, get an object of MODE, clobber it, and set the low-order
211 result
= gen_reg_rtx (mode
);
212 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
213 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
217 /* Test whether either the real or imaginary part of a complex floating
218 point number is 0.0, so that it can be ignored (when compiling
219 with -funsafe-math-optimizations). */
222 complex_part_zero_p (part
, class, submode
)
224 enum mode_class
class;
225 enum machine_mode submode
;
228 (flag_unsafe_math_optimizations
229 && class == MODE_COMPLEX_FLOAT
230 && part
== CONST0_RTX (submode
));
233 /* Generate code to perform a straightforward complex divide. */
236 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
237 unsignedp
, methods
, class, binoptab
)
238 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
239 enum machine_mode submode
;
241 enum optab_methods methods
;
242 enum mode_class
class;
249 optab this_add_optab
= add_optab
;
250 optab this_sub_optab
= sub_optab
;
251 optab this_neg_optab
= neg_optab
;
252 optab this_mul_optab
= smul_optab
;
254 if (binoptab
== sdivv_optab
)
256 this_add_optab
= addv_optab
;
257 this_sub_optab
= subv_optab
;
258 this_neg_optab
= negv_optab
;
259 this_mul_optab
= smulv_optab
;
262 /* Don't fetch these from memory more than once. */
263 real0
= force_reg (submode
, real0
);
264 real1
= force_reg (submode
, real1
);
267 imag0
= force_reg (submode
, imag0
);
269 imag1
= force_reg (submode
, imag1
);
271 /* Divisor: c*c + d*d. */
272 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
273 NULL_RTX
, unsignedp
, methods
);
275 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
276 NULL_RTX
, unsignedp
, methods
);
278 if (temp1
== 0 || temp2
== 0)
281 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
282 NULL_RTX
, unsignedp
, methods
);
286 if (complex_part_zero_p (imag0
, class, submode
))
288 /* Mathematically, ((a)(c-id))/divisor. */
289 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
291 /* Calculate the dividend. */
292 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
293 NULL_RTX
, unsignedp
, methods
);
295 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
296 NULL_RTX
, unsignedp
, methods
);
298 if (real_t
== 0 || imag_t
== 0)
301 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
302 NULL_RTX
, unsignedp
);
306 /* Mathematically, ((a+ib)(c-id))/divider. */
307 /* Calculate the dividend. */
308 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
309 NULL_RTX
, unsignedp
, methods
);
311 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
312 NULL_RTX
, unsignedp
, methods
);
314 if (temp1
== 0 || temp2
== 0)
317 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
318 NULL_RTX
, unsignedp
, methods
);
320 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
321 NULL_RTX
, unsignedp
, methods
);
323 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
324 NULL_RTX
, unsignedp
, methods
);
326 if (temp1
== 0 || temp2
== 0)
329 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
330 NULL_RTX
, unsignedp
, methods
);
332 if (real_t
== 0 || imag_t
== 0)
336 if (class == MODE_COMPLEX_FLOAT
)
337 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
338 realr
, unsignedp
, methods
);
340 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
341 real_t
, divisor
, realr
, unsignedp
);
347 emit_move_insn (realr
, res
);
349 if (class == MODE_COMPLEX_FLOAT
)
350 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
351 imagr
, unsignedp
, methods
);
353 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
354 imag_t
, divisor
, imagr
, unsignedp
);
360 emit_move_insn (imagr
, res
);
365 /* Generate code to perform a wide-input-range-acceptable complex divide. */
368 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
369 unsignedp
, methods
, class, binoptab
)
370 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
371 enum machine_mode submode
;
373 enum optab_methods methods
;
374 enum mode_class
class;
379 rtx temp1
, temp2
, lab1
, lab2
;
380 enum machine_mode mode
;
382 optab this_add_optab
= add_optab
;
383 optab this_sub_optab
= sub_optab
;
384 optab this_neg_optab
= neg_optab
;
385 optab this_mul_optab
= smul_optab
;
387 if (binoptab
== sdivv_optab
)
389 this_add_optab
= addv_optab
;
390 this_sub_optab
= subv_optab
;
391 this_neg_optab
= negv_optab
;
392 this_mul_optab
= smulv_optab
;
395 /* Don't fetch these from memory more than once. */
396 real0
= force_reg (submode
, real0
);
397 real1
= force_reg (submode
, real1
);
400 imag0
= force_reg (submode
, imag0
);
402 imag1
= force_reg (submode
, imag1
);
404 /* XXX What's an "unsigned" complex number? */
412 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
413 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
416 if (temp1
== 0 || temp2
== 0)
419 mode
= GET_MODE (temp1
);
420 lab1
= gen_label_rtx ();
421 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
422 mode
, unsignedp
, lab1
);
424 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
426 if (class == MODE_COMPLEX_FLOAT
)
427 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
428 NULL_RTX
, unsignedp
, methods
);
430 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
431 imag1
, real1
, NULL_RTX
, unsignedp
);
436 /* Calculate divisor. */
438 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
439 NULL_RTX
, unsignedp
, methods
);
444 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
445 NULL_RTX
, unsignedp
, methods
);
450 /* Calculate dividend. */
452 if (complex_part_zero_p (imag0
, class, submode
))
456 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
458 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
459 NULL_RTX
, unsignedp
, methods
);
464 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
465 NULL_RTX
, unsignedp
);
467 if (real_t
== 0 || imag_t
== 0)
472 /* Compute (a+ib)/(c+id) as
473 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
475 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
476 NULL_RTX
, unsignedp
, methods
);
481 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
482 NULL_RTX
, unsignedp
, methods
);
484 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
485 NULL_RTX
, unsignedp
, methods
);
490 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
491 NULL_RTX
, unsignedp
, methods
);
493 if (real_t
== 0 || imag_t
== 0)
497 if (class == MODE_COMPLEX_FLOAT
)
498 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
499 realr
, unsignedp
, methods
);
501 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
502 real_t
, divisor
, realr
, unsignedp
);
508 emit_move_insn (realr
, res
);
510 if (class == MODE_COMPLEX_FLOAT
)
511 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
512 imagr
, unsignedp
, methods
);
514 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
515 imag_t
, divisor
, imagr
, unsignedp
);
521 emit_move_insn (imagr
, res
);
523 lab2
= gen_label_rtx ();
524 emit_jump_insn (gen_jump (lab2
));
529 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
531 if (class == MODE_COMPLEX_FLOAT
)
532 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
533 NULL_RTX
, unsignedp
, methods
);
535 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
536 real1
, imag1
, NULL_RTX
, unsignedp
);
541 /* Calculate divisor. */
543 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
544 NULL_RTX
, unsignedp
, methods
);
549 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
550 NULL_RTX
, unsignedp
, methods
);
555 /* Calculate dividend. */
557 if (complex_part_zero_p (imag0
, class, submode
))
559 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
561 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
562 NULL_RTX
, unsignedp
, methods
);
564 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
565 NULL_RTX
, unsignedp
);
567 if (real_t
== 0 || imag_t
== 0)
572 /* Compute (a+ib)/(c+id) as
573 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
575 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
576 NULL_RTX
, unsignedp
, methods
);
581 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
582 NULL_RTX
, unsignedp
, methods
);
584 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
585 NULL_RTX
, unsignedp
, methods
);
590 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
591 NULL_RTX
, unsignedp
, methods
);
593 if (real_t
== 0 || imag_t
== 0)
597 if (class == MODE_COMPLEX_FLOAT
)
598 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
599 realr
, unsignedp
, methods
);
601 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
602 real_t
, divisor
, realr
, unsignedp
);
608 emit_move_insn (realr
, res
);
610 if (class == MODE_COMPLEX_FLOAT
)
611 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
612 imagr
, unsignedp
, methods
);
614 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
615 imag_t
, divisor
, imagr
, unsignedp
);
621 emit_move_insn (imagr
, res
);
628 /* Wrapper around expand_binop which takes an rtx code to specify
629 the operation to perform, not an optab pointer. All other
630 arguments are the same. */
632 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
633 enum machine_mode mode
;
638 enum optab_methods methods
;
640 optab binop
= code_to_optab
[(int) code
];
644 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
647 /* Generate code to perform an operation specified by BINOPTAB
648 on operands OP0 and OP1, with result having machine-mode MODE.
650 UNSIGNEDP is for the case where we have to widen the operands
651 to perform the operation. It says to use zero-extension.
653 If TARGET is nonzero, the value
654 is generated there, if it is convenient to do so.
655 In all cases an rtx is returned for the locus of the value;
656 this may or may not be TARGET. */
659 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
660 enum machine_mode mode
;
665 enum optab_methods methods
;
667 enum optab_methods next_methods
668 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
669 ? OPTAB_WIDEN
: methods
);
670 enum mode_class
class;
671 enum machine_mode wider_mode
;
673 int commutative_op
= 0;
674 int shift_op
= (binoptab
->code
== ASHIFT
675 || binoptab
->code
== ASHIFTRT
676 || binoptab
->code
== LSHIFTRT
677 || binoptab
->code
== ROTATE
678 || binoptab
->code
== ROTATERT
);
679 rtx entry_last
= get_last_insn ();
682 class = GET_MODE_CLASS (mode
);
684 op0
= protect_from_queue (op0
, 0);
685 op1
= protect_from_queue (op1
, 0);
687 target
= protect_from_queue (target
, 1);
691 op0
= force_not_mem (op0
);
692 op1
= force_not_mem (op1
);
695 /* If subtracting an integer constant, convert this into an addition of
696 the negated constant. */
698 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
700 op1
= negate_rtx (mode
, op1
);
701 binoptab
= add_optab
;
704 /* If we are inside an appropriately-short loop and one operand is an
705 expensive constant, force it into a register. */
706 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
707 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
708 op0
= force_reg (mode
, op0
);
710 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
711 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
712 op1
= force_reg (mode
, op1
);
714 /* Record where to delete back to if we backtrack. */
715 last
= get_last_insn ();
717 /* If operation is commutative,
718 try to make the first operand a register.
719 Even better, try to make it the same as the target.
720 Also try to make the last operand a constant. */
721 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
722 || binoptab
== smul_widen_optab
723 || binoptab
== umul_widen_optab
724 || binoptab
== smul_highpart_optab
725 || binoptab
== umul_highpart_optab
)
729 if (((target
== 0 || GET_CODE (target
) == REG
)
730 ? ((GET_CODE (op1
) == REG
731 && GET_CODE (op0
) != REG
)
733 : rtx_equal_p (op1
, target
))
734 || GET_CODE (op0
) == CONST_INT
)
742 /* If we can do it with a three-operand insn, do so. */
744 if (methods
!= OPTAB_MUST_WIDEN
745 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
747 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
748 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
749 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
751 rtx xop0
= op0
, xop1
= op1
;
756 temp
= gen_reg_rtx (mode
);
758 /* If it is a commutative operator and the modes would match
759 if we would swap the operands, we can save the conversions. */
762 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
763 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
767 tmp
= op0
; op0
= op1
; op1
= tmp
;
768 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
772 /* In case the insn wants input operands in modes different from
773 the result, convert the operands. It would seem that we
774 don't need to convert CONST_INTs, but we do, so that they're
775 a properly sign-extended for their modes; we choose the
776 widest mode between mode and mode[01], so that, in a widening
777 operation, we call convert_modes with different FROM and TO
778 modes, which ensures the value is sign-extended. Shift
779 operations are an exception, because the second operand needs
780 not be extended to the mode of the result. */
782 if (GET_MODE (op0
) != mode0
783 && mode0
!= VOIDmode
)
784 xop0
= convert_modes (mode0
,
785 GET_MODE (op0
) != VOIDmode
787 : GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode0
)
792 if (GET_MODE (xop1
) != mode1
793 && mode1
!= VOIDmode
)
794 xop1
= convert_modes (mode1
,
795 GET_MODE (op1
) != VOIDmode
797 : (GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode1
)
803 /* Now, if insn's predicates don't allow our operands, put them into
806 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
807 && mode0
!= VOIDmode
)
808 xop0
= copy_to_mode_reg (mode0
, xop0
);
810 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
811 && mode1
!= VOIDmode
)
812 xop1
= copy_to_mode_reg (mode1
, xop1
);
814 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
815 temp
= gen_reg_rtx (mode
);
817 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
820 /* If PAT is a multi-insn sequence, try to add an appropriate
821 REG_EQUAL note to it. If we can't because TEMP conflicts with an
822 operand, call ourselves again, this time without a target. */
823 if (GET_CODE (pat
) == SEQUENCE
824 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
826 delete_insns_since (last
);
827 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
835 delete_insns_since (last
);
838 /* If this is a multiply, see if we can do a widening operation that
839 takes operands of this mode and makes a wider mode. */
841 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
842 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
843 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
844 != CODE_FOR_nothing
))
846 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
847 unsignedp
? umul_widen_optab
: smul_widen_optab
,
848 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
852 if (GET_MODE_CLASS (mode
) == MODE_INT
)
853 return gen_lowpart (mode
, temp
);
855 return convert_to_mode (mode
, temp
, unsignedp
);
859 /* Look for a wider mode of the same class for which we think we
860 can open-code the operation. Check for a widening multiply at the
861 wider mode as well. */
863 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
864 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
865 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
866 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
868 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
869 || (binoptab
== smul_optab
870 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
871 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
872 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
873 != CODE_FOR_nothing
)))
875 rtx xop0
= op0
, xop1
= op1
;
878 /* For certain integer operations, we need not actually extend
879 the narrow operands, as long as we will truncate
880 the results to the same narrowness. */
882 if ((binoptab
== ior_optab
|| binoptab
== and_optab
883 || binoptab
== xor_optab
884 || binoptab
== add_optab
|| binoptab
== sub_optab
885 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
886 && class == MODE_INT
)
889 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
891 /* The second operand of a shift must always be extended. */
892 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
893 no_extend
&& binoptab
!= ashl_optab
);
895 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
896 unsignedp
, OPTAB_DIRECT
);
899 if (class != MODE_INT
)
902 target
= gen_reg_rtx (mode
);
903 convert_move (target
, temp
, 0);
907 return gen_lowpart (mode
, temp
);
910 delete_insns_since (last
);
914 /* These can be done a word at a time. */
915 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
917 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
918 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
924 /* If TARGET is the same as one of the operands, the REG_EQUAL note
925 won't be accurate, so use a new target. */
926 if (target
== 0 || target
== op0
|| target
== op1
)
927 target
= gen_reg_rtx (mode
);
931 /* Do the actual arithmetic. */
932 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
934 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
935 rtx x
= expand_binop (word_mode
, binoptab
,
936 operand_subword_force (op0
, i
, mode
),
937 operand_subword_force (op1
, i
, mode
),
938 target_piece
, unsignedp
, next_methods
);
943 if (target_piece
!= x
)
944 emit_move_insn (target_piece
, x
);
947 insns
= get_insns ();
950 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
952 if (binoptab
->code
!= UNKNOWN
)
954 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
955 copy_rtx (op0
), copy_rtx (op1
));
959 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
964 /* Synthesize double word shifts from single word shifts. */
965 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
966 || binoptab
== ashr_optab
)
968 && GET_CODE (op1
) == CONST_INT
969 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
970 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
971 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
972 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
974 rtx insns
, inter
, equiv_value
;
975 rtx into_target
, outof_target
;
976 rtx into_input
, outof_input
;
977 int shift_count
, left_shift
, outof_word
;
979 /* If TARGET is the same as one of the operands, the REG_EQUAL note
980 won't be accurate, so use a new target. */
981 if (target
== 0 || target
== op0
|| target
== op1
)
982 target
= gen_reg_rtx (mode
);
986 shift_count
= INTVAL (op1
);
988 /* OUTOF_* is the word we are shifting bits away from, and
989 INTO_* is the word that we are shifting bits towards, thus
990 they differ depending on the direction of the shift and
993 left_shift
= binoptab
== ashl_optab
;
994 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
996 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
997 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
999 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1000 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1002 if (shift_count
>= BITS_PER_WORD
)
1004 inter
= expand_binop (word_mode
, binoptab
,
1006 GEN_INT (shift_count
- BITS_PER_WORD
),
1007 into_target
, unsignedp
, next_methods
);
1009 if (inter
!= 0 && inter
!= into_target
)
1010 emit_move_insn (into_target
, inter
);
1012 /* For a signed right shift, we must fill the word we are shifting
1013 out of with copies of the sign bit. Otherwise it is zeroed. */
1014 if (inter
!= 0 && binoptab
!= ashr_optab
)
1015 inter
= CONST0_RTX (word_mode
);
1016 else if (inter
!= 0)
1017 inter
= expand_binop (word_mode
, binoptab
,
1019 GEN_INT (BITS_PER_WORD
- 1),
1020 outof_target
, unsignedp
, next_methods
);
1022 if (inter
!= 0 && inter
!= outof_target
)
1023 emit_move_insn (outof_target
, inter
);
1028 optab reverse_unsigned_shift
, unsigned_shift
;
1030 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1031 we must do a logical shift in the opposite direction of the
1034 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1036 /* For a shift of less than BITS_PER_WORD, to compute the word
1037 shifted towards, we need to unsigned shift the orig value of
1040 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1042 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1044 GEN_INT (BITS_PER_WORD
- shift_count
),
1045 0, unsignedp
, next_methods
);
1050 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1051 op1
, 0, unsignedp
, next_methods
);
1054 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1055 into_target
, unsignedp
, next_methods
);
1057 if (inter
!= 0 && inter
!= into_target
)
1058 emit_move_insn (into_target
, inter
);
1061 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1062 op1
, outof_target
, unsignedp
, next_methods
);
1064 if (inter
!= 0 && inter
!= outof_target
)
1065 emit_move_insn (outof_target
, inter
);
1068 insns
= get_insns ();
1073 if (binoptab
->code
!= UNKNOWN
)
1074 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1078 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1083 /* Synthesize double word rotates from single word shifts. */
1084 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1085 && class == MODE_INT
1086 && GET_CODE (op1
) == CONST_INT
1087 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1088 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1089 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1091 rtx insns
, equiv_value
;
1092 rtx into_target
, outof_target
;
1093 rtx into_input
, outof_input
;
1095 int shift_count
, left_shift
, outof_word
;
1097 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1098 won't be accurate, so use a new target. */
1099 if (target
== 0 || target
== op0
|| target
== op1
)
1100 target
= gen_reg_rtx (mode
);
1104 shift_count
= INTVAL (op1
);
1106 /* OUTOF_* is the word we are shifting bits away from, and
1107 INTO_* is the word that we are shifting bits towards, thus
1108 they differ depending on the direction of the shift and
1109 WORDS_BIG_ENDIAN. */
1111 left_shift
= (binoptab
== rotl_optab
);
1112 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1114 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1115 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1117 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1118 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1120 if (shift_count
== BITS_PER_WORD
)
1122 /* This is just a word swap. */
1123 emit_move_insn (outof_target
, into_input
);
1124 emit_move_insn (into_target
, outof_input
);
1129 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1130 rtx first_shift_count
, second_shift_count
;
1131 optab reverse_unsigned_shift
, unsigned_shift
;
1133 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1134 ? lshr_optab
: ashl_optab
);
1136 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1137 ? ashl_optab
: lshr_optab
);
1139 if (shift_count
> BITS_PER_WORD
)
1141 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1142 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
1146 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1147 second_shift_count
= GEN_INT (shift_count
);
1150 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1151 outof_input
, first_shift_count
,
1152 NULL_RTX
, unsignedp
, next_methods
);
1153 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1154 into_input
, second_shift_count
,
1155 into_target
, unsignedp
, next_methods
);
1157 if (into_temp1
!= 0 && into_temp2
!= 0)
1158 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1159 into_target
, unsignedp
, next_methods
);
1163 if (inter
!= 0 && inter
!= into_target
)
1164 emit_move_insn (into_target
, inter
);
1166 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1167 into_input
, first_shift_count
,
1168 NULL_RTX
, unsignedp
, next_methods
);
1169 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1170 outof_input
, second_shift_count
,
1171 outof_target
, unsignedp
, next_methods
);
1173 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1174 inter
= expand_binop (word_mode
, ior_optab
,
1175 outof_temp1
, outof_temp2
,
1176 outof_target
, unsignedp
, next_methods
);
1178 if (inter
!= 0 && inter
!= outof_target
)
1179 emit_move_insn (outof_target
, inter
);
1182 insns
= get_insns ();
1187 if (binoptab
->code
!= UNKNOWN
)
1188 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1192 /* We can't make this a no conflict block if this is a word swap,
1193 because the word swap case fails if the input and output values
1194 are in the same register. */
1195 if (shift_count
!= BITS_PER_WORD
)
1196 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1205 /* These can be done a word at a time by propagating carries. */
1206 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1207 && class == MODE_INT
1208 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1209 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1212 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1213 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1214 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1215 rtx xop0
, xop1
, xtarget
;
1217 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1218 value is one of those, use it. Otherwise, use 1 since it is the
1219 one easiest to get. */
1220 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1221 int normalizep
= STORE_FLAG_VALUE
;
1226 /* Prepare the operands. */
1227 xop0
= force_reg (mode
, op0
);
1228 xop1
= force_reg (mode
, op1
);
1230 xtarget
= gen_reg_rtx (mode
);
1232 if (target
== 0 || GET_CODE (target
) != REG
)
1235 /* Indicate for flow that the entire target reg is being set. */
1236 if (GET_CODE (target
) == REG
)
1237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1239 /* Do the actual arithmetic. */
1240 for (i
= 0; i
< nwords
; i
++)
1242 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1243 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1244 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1245 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1248 /* Main add/subtract of the input operands. */
1249 x
= expand_binop (word_mode
, binoptab
,
1250 op0_piece
, op1_piece
,
1251 target_piece
, unsignedp
, next_methods
);
1257 /* Store carry from main add/subtract. */
1258 carry_out
= gen_reg_rtx (word_mode
);
1259 carry_out
= emit_store_flag_force (carry_out
,
1260 (binoptab
== add_optab
1263 word_mode
, 1, normalizep
);
1270 /* Add/subtract previous carry to main result. */
1271 newx
= expand_binop (word_mode
,
1272 normalizep
== 1 ? binoptab
: otheroptab
,
1274 NULL_RTX
, 1, next_methods
);
1278 /* Get out carry from adding/subtracting carry in. */
1279 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1280 carry_tmp
= emit_store_flag_force (carry_tmp
,
1281 (binoptab
== add_optab
1284 word_mode
, 1, normalizep
);
1286 /* Logical-ior the two poss. carry together. */
1287 carry_out
= expand_binop (word_mode
, ior_optab
,
1288 carry_out
, carry_tmp
,
1289 carry_out
, 0, next_methods
);
1293 emit_move_insn (target_piece
, newx
);
1296 carry_in
= carry_out
;
1299 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1301 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1303 rtx temp
= emit_move_insn (target
, xtarget
);
1305 set_unique_reg_note (temp
,
1307 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1316 delete_insns_since (last
);
1319 /* If we want to multiply two two-word values and have normal and widening
1320 multiplies of single-word values, we can do this with three smaller
1321 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1322 because we are not operating on one word at a time.
1324 The multiplication proceeds as follows:
1325 _______________________
1326 [__op0_high_|__op0_low__]
1327 _______________________
1328 * [__op1_high_|__op1_low__]
1329 _______________________________________________
1330 _______________________
1331 (1) [__op0_low__*__op1_low__]
1332 _______________________
1333 (2a) [__op0_low__*__op1_high_]
1334 _______________________
1335 (2b) [__op0_high_*__op1_low__]
1336 _______________________
1337 (3) [__op0_high_*__op1_high_]
1340 This gives a 4-word result. Since we are only interested in the
1341 lower 2 words, partial result (3) and the upper words of (2a) and
1342 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1343 calculated using non-widening multiplication.
1345 (1), however, needs to be calculated with an unsigned widening
1346 multiplication. If this operation is not directly supported we
1347 try using a signed widening multiplication and adjust the result.
1348 This adjustment works as follows:
1350 If both operands are positive then no adjustment is needed.
1352 If the operands have different signs, for example op0_low < 0 and
1353 op1_low >= 0, the instruction treats the most significant bit of
1354 op0_low as a sign bit instead of a bit with significance
1355 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1356 with 2**BITS_PER_WORD - op0_low, and two's complements the
1357 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1360 Similarly, if both operands are negative, we need to add
1361 (op0_low + op1_low) * 2**BITS_PER_WORD.
1363 We use a trick to adjust quickly. We logically shift op0_low right
1364 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1365 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1366 logical shift exists, we do an arithmetic right shift and subtract
1369 if (binoptab
== smul_optab
1370 && class == MODE_INT
1371 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1372 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1373 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1374 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1375 != CODE_FOR_nothing
)
1376 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1377 != CODE_FOR_nothing
)))
1379 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1380 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1381 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1382 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1383 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1384 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1386 rtx op0_xhigh
= NULL_RTX
;
1387 rtx op1_xhigh
= NULL_RTX
;
1389 /* If the target is the same as one of the inputs, don't use it. This
1390 prevents problems with the REG_EQUAL note. */
1391 if (target
== op0
|| target
== op1
1392 || (target
!= 0 && GET_CODE (target
) != REG
))
1395 /* Multiply the two lower words to get a double-word product.
1396 If unsigned widening multiplication is available, use that;
1397 otherwise use the signed form and compensate. */
1399 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1401 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1402 target
, 1, OPTAB_DIRECT
);
1404 /* If we didn't succeed, delete everything we did so far. */
1406 delete_insns_since (last
);
1408 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1412 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1413 != CODE_FOR_nothing
)
1415 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1416 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1417 target
, 1, OPTAB_DIRECT
);
1418 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1419 NULL_RTX
, 1, next_methods
);
1421 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1422 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1425 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1426 NULL_RTX
, 0, next_methods
);
1428 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1429 op0_xhigh
, op0_xhigh
, 0,
1433 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1434 NULL_RTX
, 1, next_methods
);
1436 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1437 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1440 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1441 NULL_RTX
, 0, next_methods
);
1443 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1444 op1_xhigh
, op1_xhigh
, 0,
1449 /* If we have been able to directly compute the product of the
1450 low-order words of the operands and perform any required adjustments
1451 of the operands, we proceed by trying two more multiplications
1452 and then computing the appropriate sum.
1454 We have checked above that the required addition is provided.
1455 Full-word addition will normally always succeed, especially if
1456 it is provided at all, so we don't worry about its failure. The
1457 multiplication may well fail, however, so we do handle that. */
1459 if (product
&& op0_xhigh
&& op1_xhigh
)
1461 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1462 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1463 NULL_RTX
, 0, OPTAB_DIRECT
);
1465 if (!REG_P (product_high
))
1466 product_high
= force_reg (word_mode
, product_high
);
1469 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1470 product_high
, 0, next_methods
);
1472 if (temp
!= 0 && temp
!= product_high
)
1473 emit_move_insn (product_high
, temp
);
1476 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1477 NULL_RTX
, 0, OPTAB_DIRECT
);
1480 temp
= expand_binop (word_mode
, add_optab
, temp
,
1481 product_high
, product_high
,
1484 if (temp
!= 0 && temp
!= product_high
)
1485 emit_move_insn (product_high
, temp
);
1487 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1491 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1493 temp
= emit_move_insn (product
, product
);
1494 set_unique_reg_note (temp
,
1496 gen_rtx_fmt_ee (MULT
, mode
,
1505 /* If we get here, we couldn't do it for some reason even though we
1506 originally thought we could. Delete anything we've emitted in
1509 delete_insns_since (last
);
1512 /* We need to open-code the complex type operations: '+, -, * and /' */
1514 /* At this point we allow operations between two similar complex
1515 numbers, and also if one of the operands is not a complex number
1516 but rather of MODE_FLOAT or MODE_INT. However, the caller
1517 must make sure that the MODE of the non-complex operand matches
1518 the SUBMODE of the complex operand. */
1520 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1522 rtx real0
= 0, imag0
= 0;
1523 rtx real1
= 0, imag1
= 0;
1524 rtx realr
, imagr
, res
;
1529 /* Find the correct mode for the real and imaginary parts */
1530 enum machine_mode submode
1531 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1532 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1535 if (submode
== BLKmode
)
1539 target
= gen_reg_rtx (mode
);
1543 realr
= gen_realpart (submode
, target
);
1544 imagr
= gen_imagpart (submode
, target
);
1546 if (GET_MODE (op0
) == mode
)
1548 real0
= gen_realpart (submode
, op0
);
1549 imag0
= gen_imagpart (submode
, op0
);
1554 if (GET_MODE (op1
) == mode
)
1556 real1
= gen_realpart (submode
, op1
);
1557 imag1
= gen_imagpart (submode
, op1
);
1562 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1565 switch (binoptab
->code
)
1568 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1570 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1571 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1572 realr
, unsignedp
, methods
);
1576 else if (res
!= realr
)
1577 emit_move_insn (realr
, res
);
1579 if (!complex_part_zero_p (imag0
, class, submode
)
1580 && !complex_part_zero_p (imag1
, class, submode
))
1581 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1582 imagr
, unsignedp
, methods
);
1583 else if (!complex_part_zero_p (imag0
, class, submode
))
1585 else if (binoptab
->code
== MINUS
)
1586 res
= expand_unop (submode
,
1587 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1588 imag1
, imagr
, unsignedp
);
1594 else if (res
!= imagr
)
1595 emit_move_insn (imagr
, res
);
1601 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1603 if (!complex_part_zero_p (imag0
, class, submode
)
1604 && !complex_part_zero_p (imag1
, class, submode
))
1608 /* Don't fetch these from memory more than once. */
1609 real0
= force_reg (submode
, real0
);
1610 real1
= force_reg (submode
, real1
);
1611 imag0
= force_reg (submode
, imag0
);
1612 imag1
= force_reg (submode
, imag1
);
1614 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1615 unsignedp
, methods
);
1617 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1618 unsignedp
, methods
);
1620 if (temp1
== 0 || temp2
== 0)
1625 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1626 temp1
, temp2
, realr
, unsignedp
, methods
));
1630 else if (res
!= realr
)
1631 emit_move_insn (realr
, res
);
1633 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1634 NULL_RTX
, unsignedp
, methods
);
1636 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1637 NULL_RTX
, unsignedp
, methods
);
1639 if (temp1
== 0 || temp2
== 0)
1644 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1645 temp1
, temp2
, imagr
, unsignedp
, methods
));
1649 else if (res
!= imagr
)
1650 emit_move_insn (imagr
, res
);
1656 /* Don't fetch these from memory more than once. */
1657 real0
= force_reg (submode
, real0
);
1658 real1
= force_reg (submode
, real1
);
1660 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1661 realr
, unsignedp
, methods
);
1664 else if (res
!= realr
)
1665 emit_move_insn (realr
, res
);
1667 if (!complex_part_zero_p (imag0
, class, submode
))
1668 res
= expand_binop (submode
, binoptab
,
1669 real1
, imag0
, imagr
, unsignedp
, methods
);
1671 res
= expand_binop (submode
, binoptab
,
1672 real0
, imag1
, imagr
, unsignedp
, methods
);
1676 else if (res
!= imagr
)
1677 emit_move_insn (imagr
, res
);
1684 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1686 if (complex_part_zero_p (imag1
, class, submode
))
1688 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1690 /* Don't fetch these from memory more than once. */
1691 real1
= force_reg (submode
, real1
);
1693 /* Simply divide the real and imaginary parts by `c' */
1694 if (class == MODE_COMPLEX_FLOAT
)
1695 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1696 realr
, unsignedp
, methods
);
1698 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1699 real0
, real1
, realr
, unsignedp
);
1703 else if (res
!= realr
)
1704 emit_move_insn (realr
, res
);
1706 if (class == MODE_COMPLEX_FLOAT
)
1707 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1708 imagr
, unsignedp
, methods
);
1710 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1711 imag0
, real1
, imagr
, unsignedp
);
1715 else if (res
!= imagr
)
1716 emit_move_insn (imagr
, res
);
1722 switch (flag_complex_divide_method
)
1725 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1726 realr
, imagr
, submode
,
1732 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1733 realr
, imagr
, submode
,
1753 if (binoptab
->code
!= UNKNOWN
)
1755 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1756 copy_rtx (op0
), copy_rtx (op1
));
1760 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1766 /* It can't be open-coded in this mode.
1767 Use a library call if one is available and caller says that's ok. */
1769 if (binoptab
->handlers
[(int) mode
].libfunc
1770 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1774 enum machine_mode op1_mode
= mode
;
1781 op1_mode
= word_mode
;
1782 /* Specify unsigned here,
1783 since negative shift counts are meaningless. */
1784 op1x
= convert_to_mode (word_mode
, op1
, 1);
1787 if (GET_MODE (op0
) != VOIDmode
1788 && GET_MODE (op0
) != mode
)
1789 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1791 /* Pass 1 for NO_QUEUE so we don't lose any increments
1792 if the libcall is cse'd or moved. */
1793 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1794 NULL_RTX
, LCT_CONST
, mode
, 2,
1795 op0
, mode
, op1x
, op1_mode
);
1797 insns
= get_insns ();
1800 target
= gen_reg_rtx (mode
);
1801 emit_libcall_block (insns
, target
, value
,
1802 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1807 delete_insns_since (last
);
1809 /* It can't be done in this mode. Can we do it in a wider mode? */
1811 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1812 || methods
== OPTAB_MUST_WIDEN
))
1814 /* Caller says, don't even try. */
1815 delete_insns_since (entry_last
);
1819 /* Compute the value of METHODS to pass to recursive calls.
1820 Don't allow widening to be tried recursively. */
1822 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1824 /* Look for a wider mode of the same class for which it appears we can do
1827 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1829 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1830 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1832 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1833 != CODE_FOR_nothing
)
1834 || (methods
== OPTAB_LIB
1835 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1837 rtx xop0
= op0
, xop1
= op1
;
1840 /* For certain integer operations, we need not actually extend
1841 the narrow operands, as long as we will truncate
1842 the results to the same narrowness. */
1844 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1845 || binoptab
== xor_optab
1846 || binoptab
== add_optab
|| binoptab
== sub_optab
1847 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1848 && class == MODE_INT
)
1851 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1852 unsignedp
, no_extend
);
1854 /* The second operand of a shift must always be extended. */
1855 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1856 no_extend
&& binoptab
!= ashl_optab
);
1858 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1859 unsignedp
, methods
);
1862 if (class != MODE_INT
)
1865 target
= gen_reg_rtx (mode
);
1866 convert_move (target
, temp
, 0);
1870 return gen_lowpart (mode
, temp
);
1873 delete_insns_since (last
);
1878 delete_insns_since (entry_last
);
1882 /* Expand a binary operator which has both signed and unsigned forms.
1883 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1886 If we widen unsigned operands, we may use a signed wider operation instead
1887 of an unsigned wider operation, since the result would be the same. */
1890 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1891 enum machine_mode mode
;
1892 optab uoptab
, soptab
;
1893 rtx op0
, op1
, target
;
1895 enum optab_methods methods
;
1898 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1899 struct optab wide_soptab
;
1901 /* Do it without widening, if possible. */
1902 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1903 unsignedp
, OPTAB_DIRECT
);
1904 if (temp
|| methods
== OPTAB_DIRECT
)
1907 /* Try widening to a signed int. Make a fake signed optab that
1908 hides any signed insn for direct use. */
1909 wide_soptab
= *soptab
;
1910 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1911 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1913 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1914 unsignedp
, OPTAB_WIDEN
);
1916 /* For unsigned operands, try widening to an unsigned int. */
1917 if (temp
== 0 && unsignedp
)
1918 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1919 unsignedp
, OPTAB_WIDEN
);
1920 if (temp
|| methods
== OPTAB_WIDEN
)
1923 /* Use the right width lib call if that exists. */
1924 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1925 if (temp
|| methods
== OPTAB_LIB
)
1928 /* Must widen and use a lib call, use either signed or unsigned. */
1929 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1930 unsignedp
, methods
);
1934 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1935 unsignedp
, methods
);
1939 /* Generate code to perform an operation specified by BINOPTAB
1940 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1941 We assume that the order of the operands for the instruction
1942 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1943 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1945 Either TARG0 or TARG1 may be zero, but what that means is that
1946 the result is not actually wanted. We will generate it into
1947 a dummy pseudo-reg and discard it. They may not both be zero.
1949 Returns 1 if this operation can be performed; 0 if not. */
1952 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1958 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1959 enum mode_class
class;
1960 enum machine_mode wider_mode
;
1961 rtx entry_last
= get_last_insn ();
1964 class = GET_MODE_CLASS (mode
);
1966 op0
= protect_from_queue (op0
, 0);
1967 op1
= protect_from_queue (op1
, 0);
1971 op0
= force_not_mem (op0
);
1972 op1
= force_not_mem (op1
);
1975 /* If we are inside an appropriately-short loop and one operand is an
1976 expensive constant, force it into a register. */
1977 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1978 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1979 op0
= force_reg (mode
, op0
);
1981 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1982 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1983 op1
= force_reg (mode
, op1
);
1986 targ0
= protect_from_queue (targ0
, 1);
1988 targ0
= gen_reg_rtx (mode
);
1990 targ1
= protect_from_queue (targ1
, 1);
1992 targ1
= gen_reg_rtx (mode
);
1994 /* Record where to go back to if we fail. */
1995 last
= get_last_insn ();
1997 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1999 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2000 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2001 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2003 rtx xop0
= op0
, xop1
= op1
;
2005 /* In case this insn wants input operands in modes different from the
2006 result, convert the operands. */
2007 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
2008 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2010 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
2011 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
2013 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2014 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2015 xop0
= copy_to_mode_reg (mode0
, xop0
);
2017 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2018 xop1
= copy_to_mode_reg (mode1
, xop1
);
2020 /* We could handle this, but we should always be called with a pseudo
2021 for our targets and all insns should take them as outputs. */
2022 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2023 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2026 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2033 delete_insns_since (last
);
2036 /* It can't be done in this mode. Can we do it in a wider mode? */
2038 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2040 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2041 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2043 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2044 != CODE_FOR_nothing
)
2046 rtx t0
= gen_reg_rtx (wider_mode
);
2047 rtx t1
= gen_reg_rtx (wider_mode
);
2048 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2049 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2051 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2054 convert_move (targ0
, t0
, unsignedp
);
2055 convert_move (targ1
, t1
, unsignedp
);
2059 delete_insns_since (last
);
2064 delete_insns_since (entry_last
);
2068 /* Wrapper around expand_unop which takes an rtx code to specify
2069 the operation to perform, not an optab pointer. All other
2070 arguments are the same. */
2072 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2073 enum machine_mode mode
;
2079 optab unop
= code_to_optab
[(int) code
];
2083 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2086 /* Generate code to perform an operation specified by UNOPTAB
2087 on operand OP0, with result having machine-mode MODE.
2089 UNSIGNEDP is for the case where we have to widen the operands
2090 to perform the operation. It says to use zero-extension.
2092 If TARGET is nonzero, the value
2093 is generated there, if it is convenient to do so.
2094 In all cases an rtx is returned for the locus of the value;
2095 this may or may not be TARGET. */
2098 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2099 enum machine_mode mode
;
2105 enum mode_class
class;
2106 enum machine_mode wider_mode
;
2108 rtx last
= get_last_insn ();
2111 class = GET_MODE_CLASS (mode
);
2113 op0
= protect_from_queue (op0
, 0);
2117 op0
= force_not_mem (op0
);
2121 target
= protect_from_queue (target
, 1);
2123 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2125 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2126 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2132 temp
= gen_reg_rtx (mode
);
2134 if (GET_MODE (xop0
) != VOIDmode
2135 && GET_MODE (xop0
) != mode0
)
2136 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2138 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2140 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2141 xop0
= copy_to_mode_reg (mode0
, xop0
);
2143 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2144 temp
= gen_reg_rtx (mode
);
2146 pat
= GEN_FCN (icode
) (temp
, xop0
);
2149 if (GET_CODE (pat
) == SEQUENCE
2150 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2152 delete_insns_since (last
);
2153 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2161 delete_insns_since (last
);
2164 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2166 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2167 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2168 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2170 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2174 /* For certain operations, we need not actually extend
2175 the narrow operand, as long as we will truncate the
2176 results to the same narrowness. */
2178 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2179 (unoptab
== neg_optab
2180 || unoptab
== one_cmpl_optab
)
2181 && class == MODE_INT
);
2183 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2188 if (class != MODE_INT
)
2191 target
= gen_reg_rtx (mode
);
2192 convert_move (target
, temp
, 0);
2196 return gen_lowpart (mode
, temp
);
2199 delete_insns_since (last
);
2203 /* These can be done a word at a time. */
2204 if (unoptab
== one_cmpl_optab
2205 && class == MODE_INT
2206 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2207 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2212 if (target
== 0 || target
== op0
)
2213 target
= gen_reg_rtx (mode
);
2217 /* Do the actual arithmetic. */
2218 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2220 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2221 rtx x
= expand_unop (word_mode
, unoptab
,
2222 operand_subword_force (op0
, i
, mode
),
2223 target_piece
, unsignedp
);
2225 if (target_piece
!= x
)
2226 emit_move_insn (target_piece
, x
);
2229 insns
= get_insns ();
2232 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2233 gen_rtx_fmt_e (unoptab
->code
, mode
,
2238 /* Open-code the complex negation operation. */
2239 else if (unoptab
->code
== NEG
2240 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2246 /* Find the correct mode for the real and imaginary parts */
2247 enum machine_mode submode
2248 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2249 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2252 if (submode
== BLKmode
)
2256 target
= gen_reg_rtx (mode
);
2260 target_piece
= gen_imagpart (submode
, target
);
2261 x
= expand_unop (submode
, unoptab
,
2262 gen_imagpart (submode
, op0
),
2263 target_piece
, unsignedp
);
2264 if (target_piece
!= x
)
2265 emit_move_insn (target_piece
, x
);
2267 target_piece
= gen_realpart (submode
, target
);
2268 x
= expand_unop (submode
, unoptab
,
2269 gen_realpart (submode
, op0
),
2270 target_piece
, unsignedp
);
2271 if (target_piece
!= x
)
2272 emit_move_insn (target_piece
, x
);
2277 emit_no_conflict_block (seq
, target
, op0
, 0,
2278 gen_rtx_fmt_e (unoptab
->code
, mode
,
2283 /* Now try a library call in this mode. */
2284 if (unoptab
->handlers
[(int) mode
].libfunc
)
2291 /* Pass 1 for NO_QUEUE so we don't lose any increments
2292 if the libcall is cse'd or moved. */
2293 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2294 NULL_RTX
, LCT_CONST
, mode
, 1, op0
, mode
);
2295 insns
= get_insns ();
2298 target
= gen_reg_rtx (mode
);
2299 emit_libcall_block (insns
, target
, value
,
2300 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2305 /* It can't be done in this mode. Can we do it in a wider mode? */
2307 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2309 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2310 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2312 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2313 != CODE_FOR_nothing
)
2314 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2318 /* For certain operations, we need not actually extend
2319 the narrow operand, as long as we will truncate the
2320 results to the same narrowness. */
2322 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2323 (unoptab
== neg_optab
2324 || unoptab
== one_cmpl_optab
)
2325 && class == MODE_INT
);
2327 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2332 if (class != MODE_INT
)
2335 target
= gen_reg_rtx (mode
);
2336 convert_move (target
, temp
, 0);
2340 return gen_lowpart (mode
, temp
);
2343 delete_insns_since (last
);
2348 /* If there is no negate operation, try doing a subtract from zero.
2349 The US Software GOFAST library needs this. */
2350 if (unoptab
->code
== NEG
)
2353 temp
= expand_binop (mode
,
2354 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2355 CONST0_RTX (mode
), op0
,
2356 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2364 /* Emit code to compute the absolute value of OP0, with result to
2365 TARGET if convenient. (TARGET may be 0.) The return value says
2366 where the result actually is to be found.
2368 MODE is the mode of the operand; the mode of the result is
2369 different but can be deduced from MODE.
2374 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2375 enum machine_mode mode
;
2378 int result_unsignedp
;
2384 result_unsignedp
= 1;
2386 /* First try to do it with a special abs instruction. */
2387 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2392 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2393 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2395 rtx last
= get_last_insn ();
2397 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2399 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2405 delete_insns_since (last
);
2408 /* If this machine has expensive jumps, we can do integer absolute
2409 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2410 where W is the width of MODE. */
2412 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2414 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2415 size_int (GET_MODE_BITSIZE (mode
) - 1),
2418 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2421 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2422 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2428 /* If that does not win, use conditional jump and negate. */
2430 /* It is safe to use the target if it is the same
2431 as the source if this is also a pseudo register */
2432 if (op0
== target
&& GET_CODE (op0
) == REG
2433 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2436 op1
= gen_label_rtx ();
2437 if (target
== 0 || ! safe
2438 || GET_MODE (target
) != mode
2439 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2440 || (GET_CODE (target
) == REG
2441 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2442 target
= gen_reg_rtx (mode
);
2444 emit_move_insn (target
, op0
);
2447 /* If this mode is an integer too wide to compare properly,
2448 compare word by word. Rely on CSE to optimize constant cases. */
2449 if (GET_MODE_CLASS (mode
) == MODE_INT
2450 && ! can_compare_p (GE
, mode
, ccp_jump
))
2451 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2454 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2455 NULL_RTX
, NULL_RTX
, op1
);
2457 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2460 emit_move_insn (target
, op0
);
2466 /* Emit code to compute the absolute value of OP0, with result to
2467 TARGET if convenient. (TARGET may be 0.) The return value says
2468 where the result actually is to be found.
2470 MODE is the mode of the operand; the mode of the result is
2471 different but can be deduced from MODE.
2473 UNSIGNEDP is relevant for complex integer modes. */
2476 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2477 enum machine_mode mode
;
2482 enum mode_class
class = GET_MODE_CLASS (mode
);
2483 enum machine_mode wider_mode
;
2485 rtx entry_last
= get_last_insn ();
2488 optab this_abs_optab
;
2490 /* Find the correct mode for the real and imaginary parts. */
2491 enum machine_mode submode
2492 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2493 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2496 if (submode
== BLKmode
)
2499 op0
= protect_from_queue (op0
, 0);
2503 op0
= force_not_mem (op0
);
2506 last
= get_last_insn ();
2509 target
= protect_from_queue (target
, 1);
2511 this_abs_optab
= ! unsignedp
&& flag_trapv
2512 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2513 ? absv_optab
: abs_optab
;
2515 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2517 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2518 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2524 temp
= gen_reg_rtx (submode
);
2526 if (GET_MODE (xop0
) != VOIDmode
2527 && GET_MODE (xop0
) != mode0
)
2528 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2530 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2532 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2533 xop0
= copy_to_mode_reg (mode0
, xop0
);
2535 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2536 temp
= gen_reg_rtx (submode
);
2538 pat
= GEN_FCN (icode
) (temp
, xop0
);
2541 if (GET_CODE (pat
) == SEQUENCE
2542 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2545 delete_insns_since (last
);
2546 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2555 delete_insns_since (last
);
2558 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2560 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2561 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2563 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2564 != CODE_FOR_nothing
)
2568 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2569 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2573 if (class != MODE_COMPLEX_INT
)
2576 target
= gen_reg_rtx (submode
);
2577 convert_move (target
, temp
, 0);
2581 return gen_lowpart (submode
, temp
);
2584 delete_insns_since (last
);
2588 /* Open-code the complex absolute-value operation
2589 if we can open-code sqrt. Otherwise it's not worth while. */
2590 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
2593 rtx real
, imag
, total
;
2595 real
= gen_realpart (submode
, op0
);
2596 imag
= gen_imagpart (submode
, op0
);
2598 /* Square both parts. */
2599 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2600 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2602 /* Sum the parts. */
2603 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2604 0, OPTAB_LIB_WIDEN
);
2606 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2607 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2609 delete_insns_since (last
);
2614 /* Now try a library call in this mode. */
2615 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
2622 /* Pass 1 for NO_QUEUE so we don't lose any increments
2623 if the libcall is cse'd or moved. */
2624 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2625 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
2626 insns
= get_insns ();
2629 target
= gen_reg_rtx (submode
);
2630 emit_libcall_block (insns
, target
, value
,
2631 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
2636 /* It can't be done in this mode. Can we do it in a wider mode? */
2638 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2639 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2641 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2642 != CODE_FOR_nothing
)
2643 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2647 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2649 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2653 if (class != MODE_COMPLEX_INT
)
2656 target
= gen_reg_rtx (submode
);
2657 convert_move (target
, temp
, 0);
2661 return gen_lowpart (submode
, temp
);
2664 delete_insns_since (last
);
2668 delete_insns_since (entry_last
);
2672 /* Generate an instruction whose insn-code is INSN_CODE,
2673 with two operands: an output TARGET and an input OP0.
2674 TARGET *must* be nonzero, and the output is always stored there.
2675 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2676 the value that is stored into TARGET. */
2679 emit_unop_insn (icode
, target
, op0
, code
)
2686 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2689 temp
= target
= protect_from_queue (target
, 1);
2691 op0
= protect_from_queue (op0
, 0);
2693 /* Sign and zero extension from memory is often done specially on
2694 RISC machines, so forcing into a register here can pessimize
2696 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2697 op0
= force_not_mem (op0
);
2699 /* Now, if insn does not accept our operands, put them into pseudos. */
2701 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2702 op0
= copy_to_mode_reg (mode0
, op0
);
2704 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2705 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2706 temp
= gen_reg_rtx (GET_MODE (temp
));
2708 pat
= GEN_FCN (icode
) (temp
, op0
);
2710 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2711 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2716 emit_move_insn (target
, temp
);
2719 /* Emit code to perform a series of operations on a multi-word quantity, one
2722 Such a block is preceded by a CLOBBER of the output, consists of multiple
2723 insns, each setting one word of the output, and followed by a SET copying
2724 the output to itself.
2726 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2727 note indicating that it doesn't conflict with the (also multi-word)
2728 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2731 INSNS is a block of code generated to perform the operation, not including
2732 the CLOBBER and final copy. All insns that compute intermediate values
2733 are first emitted, followed by the block as described above.
2735 TARGET, OP0, and OP1 are the output and inputs of the operations,
2736 respectively. OP1 may be zero for a unary operation.
2738 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2741 If TARGET is not a register, INSNS is simply emitted with no special
2742 processing. Likewise if anything in INSNS is not an INSN or if
2743 there is a libcall block inside INSNS.
2745 The final insn emitted is returned. */
2748 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2754 rtx prev
, next
, first
, last
, insn
;
2756 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2757 return emit_insns (insns
);
2759 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2760 if (GET_CODE (insn
) != INSN
2761 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2762 return emit_insns (insns
);
2764 /* First emit all insns that do not store into words of the output and remove
2765 these from the list. */
2766 for (insn
= insns
; insn
; insn
= next
)
2771 next
= NEXT_INSN (insn
);
2773 /* Some ports (cris) create an libcall regions at their own. We must
2774 avoid any potential nesting of LIBCALLs. */
2775 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2776 remove_note (insn
, note
);
2777 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2778 remove_note (insn
, note
);
2780 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2781 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2782 set
= PATTERN (insn
);
2783 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2785 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2786 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2788 set
= XVECEXP (PATTERN (insn
), 0, i
);
2796 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2798 if (PREV_INSN (insn
))
2799 NEXT_INSN (PREV_INSN (insn
)) = next
;
2804 PREV_INSN (next
) = PREV_INSN (insn
);
2810 prev
= get_last_insn ();
2812 /* Now write the CLOBBER of the output, followed by the setting of each
2813 of the words, followed by the final copy. */
2814 if (target
!= op0
&& target
!= op1
)
2815 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2817 for (insn
= insns
; insn
; insn
= next
)
2819 next
= NEXT_INSN (insn
);
2822 if (op1
&& GET_CODE (op1
) == REG
)
2823 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2826 if (op0
&& GET_CODE (op0
) == REG
)
2827 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2831 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2832 != CODE_FOR_nothing
)
2834 last
= emit_move_insn (target
, target
);
2836 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2840 last
= get_last_insn ();
2842 /* Remove any existing REG_EQUAL note from "last", or else it will
2843 be mistaken for a note referring to the full contents of the
2844 alleged libcall value when found together with the REG_RETVAL
2845 note added below. An existing note can come from an insn
2846 expansion at "last". */
2847 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2851 first
= get_insns ();
2853 first
= NEXT_INSN (prev
);
2855 /* Encapsulate the block so it gets manipulated as a unit. */
2856 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2858 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
2863 /* Emit code to make a call to a constant function or a library call.
2865 INSNS is a list containing all insns emitted in the call.
2866 These insns leave the result in RESULT. Our block is to copy RESULT
2867 to TARGET, which is logically equivalent to EQUIV.
2869 We first emit any insns that set a pseudo on the assumption that these are
2870 loading constants into registers; doing so allows them to be safely cse'ed
2871 between blocks. Then we emit all the other insns in the block, followed by
2872 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2873 note with an operand of EQUIV.
2875 Moving assignments to pseudos outside of the block is done to improve
2876 the generated code, but is not required to generate correct code,
2877 hence being unable to move an assignment is not grounds for not making
2878 a libcall block. There are two reasons why it is safe to leave these
2879 insns inside the block: First, we know that these pseudos cannot be
2880 used in generated RTL outside the block since they are created for
2881 temporary purposes within the block. Second, CSE will not record the
2882 values of anything set inside a libcall block, so we know they must
2883 be dead at the end of the block.
2885 Except for the first group of insns (the ones setting pseudos), the
2886 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2889 emit_libcall_block (insns
, target
, result
, equiv
)
2895 rtx final_dest
= target
;
2896 rtx prev
, next
, first
, last
, insn
;
2898 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
2899 into a MEM later. Protect the libcall block from this change. */
2900 if (! REG_P (target
) || REG_USERVAR_P (target
))
2901 target
= gen_reg_rtx (GET_MODE (target
));
2903 /* If we're using non-call exceptions, a libcall corresponding to an
2904 operation that may trap may also trap. */
2905 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
2907 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2908 if (GET_CODE (insn
) == CALL_INSN
)
2910 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2912 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
2913 remove_note (insn
, note
);
2917 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
2918 reg note to indicate that this call cannot throw or execute a nonlocal
2919 goto (unless there is already a REG_EH_REGION note, in which case
2921 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2922 if (GET_CODE (insn
) == CALL_INSN
)
2924 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2927 XEXP (note
, 0) = GEN_INT (-1);
2929 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
2933 /* First emit all insns that set pseudos. Remove them from the list as
2934 we go. Avoid insns that set pseudos which were referenced in previous
2935 insns. These can be generated by move_by_pieces, for example,
2936 to update an address. Similarly, avoid insns that reference things
2937 set in previous insns. */
2939 for (insn
= insns
; insn
; insn
= next
)
2941 rtx set
= single_set (insn
);
2944 /* Some ports (cris) create an libcall regions at their own. We must
2945 avoid any potential nesting of LIBCALLs. */
2946 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2947 remove_note (insn
, note
);
2948 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2949 remove_note (insn
, note
);
2951 next
= NEXT_INSN (insn
);
2953 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2954 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2956 || ((! INSN_P(insns
)
2957 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
2958 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2959 && ! modified_in_p (SET_SRC (set
), insns
)
2960 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2962 if (PREV_INSN (insn
))
2963 NEXT_INSN (PREV_INSN (insn
)) = next
;
2968 PREV_INSN (next
) = PREV_INSN (insn
);
2974 prev
= get_last_insn ();
2976 /* Write the remaining insns followed by the final copy. */
2978 for (insn
= insns
; insn
; insn
= next
)
2980 next
= NEXT_INSN (insn
);
2985 last
= emit_move_insn (target
, result
);
2986 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2987 != CODE_FOR_nothing
)
2988 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
2991 /* Remove any existing REG_EQUAL note from "last", or else it will
2992 be mistaken for a note referring to the full contents of the
2993 libcall value when found together with the REG_RETVAL note added
2994 below. An existing note can come from an insn expansion at
2996 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2999 if (final_dest
!= target
)
3000 emit_move_insn (final_dest
, target
);
3003 first
= get_insns ();
3005 first
= NEXT_INSN (prev
);
3007 /* Encapsulate the block so it gets manipulated as a unit. */
3008 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3010 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3012 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3017 /* Generate code to store zero in X. */
3023 emit_move_insn (x
, const0_rtx
);
3026 /* Generate code to store 1 in X
3027 assuming it contains zero beforehand. */
3030 emit_0_to_1_insn (x
)
3033 emit_move_insn (x
, const1_rtx
);
3036 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3037 PURPOSE describes how this comparison will be used. CODE is the rtx
3038 comparison code we will be using.
3040 ??? Actually, CODE is slightly weaker than that. A target is still
3041 required to implement all of the normal bcc operations, but not
3042 required to implement all (or any) of the unordered bcc operations. */
3045 can_compare_p (code
, mode
, purpose
)
3047 enum machine_mode mode
;
3048 enum can_compare_purpose purpose
;
3052 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3054 if (purpose
== ccp_jump
)
3055 return bcc_gen_fctn
[(int)code
] != NULL
;
3056 else if (purpose
== ccp_store_flag
)
3057 return setcc_gen_code
[(int)code
] != CODE_FOR_nothing
;
3059 /* There's only one cmov entry point, and it's allowed to fail. */
3062 if (purpose
== ccp_jump
3063 && cbranch_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3065 if (purpose
== ccp_cmov
3066 && cmov_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3068 if (purpose
== ccp_store_flag
3069 && cstore_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3072 mode
= GET_MODE_WIDER_MODE (mode
);
3074 while (mode
!= VOIDmode
);
3079 /* This function is called when we are going to emit a compare instruction that
3080 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3082 *PMODE is the mode of the inputs (in case they are const_int).
3083 *PUNSIGNEDP nonzero says that the operands are unsigned;
3084 this matters if they need to be widened.
3086 If they have mode BLKmode, then SIZE specifies the size of both operands.
3088 This function performs all the setup necessary so that the caller only has
3089 to emit a single comparison insn. This setup can involve doing a BLKmode
3090 comparison or emitting a library call to perform the comparison if no insn
3091 is available to handle it.
3092 The values which are passed in through pointers can be modified; the caller
3093 should perform the comparison on the modified values. */
3096 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3098 enum rtx_code
*pcomparison
;
3100 enum machine_mode
*pmode
;
3102 enum can_compare_purpose purpose
;
3104 enum machine_mode mode
= *pmode
;
3105 rtx x
= *px
, y
= *py
;
3106 int unsignedp
= *punsignedp
;
3107 enum mode_class
class;
3109 class = GET_MODE_CLASS (mode
);
3111 /* They could both be VOIDmode if both args are immediate constants,
3112 but we should fold that at an earlier stage.
3113 With no special code here, this will call abort,
3114 reminding the programmer to implement such folding. */
3116 if (mode
!= BLKmode
&& flag_force_mem
)
3118 x
= force_not_mem (x
);
3119 y
= force_not_mem (y
);
3122 /* If we are inside an appropriately-short loop and one operand is an
3123 expensive constant, force it into a register. */
3124 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3125 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3126 x
= force_reg (mode
, x
);
3128 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3129 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3130 y
= force_reg (mode
, y
);
3133 /* Abort if we have a non-canonical comparison. The RTL documentation
3134 states that canonical comparisons are required only for targets which
3136 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3140 /* Don't let both operands fail to indicate the mode. */
3141 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3142 x
= force_reg (mode
, x
);
3144 /* Handle all BLKmode compares. */
3146 if (mode
== BLKmode
)
3149 enum machine_mode result_mode
;
3150 rtx opalign ATTRIBUTE_UNUSED
3151 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3154 x
= protect_from_queue (x
, 0);
3155 y
= protect_from_queue (y
, 0);
3159 #ifdef HAVE_cmpstrqi
3161 && GET_CODE (size
) == CONST_INT
3162 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3164 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3165 result
= gen_reg_rtx (result_mode
);
3166 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3170 #ifdef HAVE_cmpstrhi
3172 && GET_CODE (size
) == CONST_INT
3173 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3175 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3176 result
= gen_reg_rtx (result_mode
);
3177 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3181 #ifdef HAVE_cmpstrsi
3184 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3185 result
= gen_reg_rtx (result_mode
);
3186 size
= protect_from_queue (size
, 0);
3187 emit_insn (gen_cmpstrsi (result
, x
, y
,
3188 convert_to_mode (SImode
, size
, 1),
3194 #ifdef TARGET_MEM_FUNCTIONS
3195 emit_library_call (memcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3196 TYPE_MODE (integer_type_node
), 3,
3197 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3198 convert_to_mode (TYPE_MODE (sizetype
), size
,
3199 TREE_UNSIGNED (sizetype
)),
3200 TYPE_MODE (sizetype
));
3202 emit_library_call (bcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3203 TYPE_MODE (integer_type_node
), 3,
3204 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3205 convert_to_mode (TYPE_MODE (integer_type_node
),
3207 TREE_UNSIGNED (integer_type_node
)),
3208 TYPE_MODE (integer_type_node
));
3211 /* Immediately move the result of the libcall into a pseudo
3212 register so reload doesn't clobber the value if it needs
3213 the return register for a spill reg. */
3214 result
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
3215 result_mode
= TYPE_MODE (integer_type_node
);
3216 emit_move_insn (result
,
3217 hard_libcall_value (result_mode
));
3221 *pmode
= result_mode
;
3227 if (can_compare_p (*pcomparison
, mode
, purpose
))
3230 /* Handle a lib call just for the mode we are using. */
3232 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3234 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3237 /* If we want unsigned, and this mode has a distinct unsigned
3238 comparison routine, use that. */
3239 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3240 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3242 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
,
3245 /* Immediately move the result of the libcall into a pseudo
3246 register so reload doesn't clobber the value if it needs
3247 the return register for a spill reg. */
3248 result
= gen_reg_rtx (word_mode
);
3249 emit_move_insn (result
, hard_libcall_value (word_mode
));
3251 /* Integer comparison returns a result that must be compared against 1,
3252 so that even if we do an unsigned compare afterward,
3253 there is still a value that can represent the result "less than". */
3260 if (class == MODE_FLOAT
)
3261 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3267 /* Before emitting an insn with code ICODE, make sure that X, which is going
3268 to be used for operand OPNUM of the insn, is converted from mode MODE to
3269 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3270 that it is accepted by the operand predicate. Return the new value. */
3273 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3277 enum machine_mode mode
, wider_mode
;
3280 x
= protect_from_queue (x
, 0);
3282 if (mode
!= wider_mode
)
3283 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3285 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3286 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3287 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3291 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3292 we can do the comparison.
3293 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3294 be NULL_RTX which indicates that only a comparison is to be generated. */
3297 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3299 enum machine_mode mode
;
3300 enum rtx_code comparison
;
3304 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3305 enum mode_class
class = GET_MODE_CLASS (mode
);
3306 enum machine_mode wider_mode
= mode
;
3308 /* Try combined insns first. */
3311 enum insn_code icode
;
3312 PUT_MODE (test
, wider_mode
);
3316 icode
= cbranch_optab
->handlers
[(int)wider_mode
].insn_code
;
3318 if (icode
!= CODE_FOR_nothing
3319 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3321 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3322 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3323 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3328 /* Handle some compares against zero. */
3329 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3330 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3332 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3333 emit_insn (GEN_FCN (icode
) (x
));
3335 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3339 /* Handle compares for which there is a directly suitable insn. */
3341 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3342 if (icode
!= CODE_FOR_nothing
)
3344 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3345 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3346 emit_insn (GEN_FCN (icode
) (x
, y
));
3348 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3352 if (class != MODE_INT
&& class != MODE_FLOAT
3353 && class != MODE_COMPLEX_FLOAT
)
3356 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3357 } while (wider_mode
!= VOIDmode
);
3362 /* Generate code to compare X with Y so that the condition codes are
3363 set and to jump to LABEL if the condition is true. If X is a
3364 constant and Y is not a constant, then the comparison is swapped to
3365 ensure that the comparison RTL has the canonical form.
3367 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3368 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3369 the proper branch condition code.
3371 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3373 MODE is the mode of the inputs (in case they are const_int).
3375 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3376 be passed unchanged to emit_cmp_insn, then potentially converted into an
3377 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3380 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3382 enum rtx_code comparison
;
3384 enum machine_mode mode
;
3388 rtx op0
= x
, op1
= y
;
3390 /* Swap operands and condition to ensure canonical RTL. */
3391 if (swap_commutative_operands_p (x
, y
))
3393 /* If we're not emitting a branch, this means some caller
3399 comparison
= swap_condition (comparison
);
3403 /* If OP0 is still a constant, then both X and Y must be constants. Force
3404 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3406 if (CONSTANT_P (op0
))
3407 op0
= force_reg (mode
, op0
);
3412 comparison
= unsigned_condition (comparison
);
3414 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3416 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3419 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3422 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3424 enum rtx_code comparison
;
3426 enum machine_mode mode
;
3429 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3432 /* Emit a library call comparison between floating point X and Y.
3433 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3436 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3438 enum rtx_code
*pcomparison
;
3439 enum machine_mode
*pmode
;
3442 enum rtx_code comparison
= *pcomparison
;
3444 rtx x
= *px
= protect_from_queue (*px
, 0);
3445 rtx y
= *py
= protect_from_queue (*py
, 0);
3446 enum machine_mode mode
= GET_MODE (x
);
3454 libfunc
= eqhf2_libfunc
;
3458 libfunc
= nehf2_libfunc
;
3462 libfunc
= gthf2_libfunc
;
3463 if (libfunc
== NULL_RTX
)
3465 tmp
= x
; x
= y
; y
= tmp
;
3467 libfunc
= lthf2_libfunc
;
3472 libfunc
= gehf2_libfunc
;
3473 if (libfunc
== NULL_RTX
)
3475 tmp
= x
; x
= y
; y
= tmp
;
3477 libfunc
= lehf2_libfunc
;
3482 libfunc
= lthf2_libfunc
;
3483 if (libfunc
== NULL_RTX
)
3485 tmp
= x
; x
= y
; y
= tmp
;
3487 libfunc
= gthf2_libfunc
;
3492 libfunc
= lehf2_libfunc
;
3493 if (libfunc
== NULL_RTX
)
3495 tmp
= x
; x
= y
; y
= tmp
;
3497 libfunc
= gehf2_libfunc
;
3502 libfunc
= unordhf2_libfunc
;
3508 else if (mode
== SFmode
)
3512 libfunc
= eqsf2_libfunc
;
3516 libfunc
= nesf2_libfunc
;
3520 libfunc
= gtsf2_libfunc
;
3521 if (libfunc
== NULL_RTX
)
3523 tmp
= x
; x
= y
; y
= tmp
;
3525 libfunc
= ltsf2_libfunc
;
3530 libfunc
= gesf2_libfunc
;
3531 if (libfunc
== NULL_RTX
)
3533 tmp
= x
; x
= y
; y
= tmp
;
3535 libfunc
= lesf2_libfunc
;
3540 libfunc
= ltsf2_libfunc
;
3541 if (libfunc
== NULL_RTX
)
3543 tmp
= x
; x
= y
; y
= tmp
;
3545 libfunc
= gtsf2_libfunc
;
3550 libfunc
= lesf2_libfunc
;
3551 if (libfunc
== NULL_RTX
)
3553 tmp
= x
; x
= y
; y
= tmp
;
3555 libfunc
= gesf2_libfunc
;
3560 libfunc
= unordsf2_libfunc
;
3566 else if (mode
== DFmode
)
3570 libfunc
= eqdf2_libfunc
;
3574 libfunc
= nedf2_libfunc
;
3578 libfunc
= gtdf2_libfunc
;
3579 if (libfunc
== NULL_RTX
)
3581 tmp
= x
; x
= y
; y
= tmp
;
3583 libfunc
= ltdf2_libfunc
;
3588 libfunc
= gedf2_libfunc
;
3589 if (libfunc
== NULL_RTX
)
3591 tmp
= x
; x
= y
; y
= tmp
;
3593 libfunc
= ledf2_libfunc
;
3598 libfunc
= ltdf2_libfunc
;
3599 if (libfunc
== NULL_RTX
)
3601 tmp
= x
; x
= y
; y
= tmp
;
3603 libfunc
= gtdf2_libfunc
;
3608 libfunc
= ledf2_libfunc
;
3609 if (libfunc
== NULL_RTX
)
3611 tmp
= x
; x
= y
; y
= tmp
;
3613 libfunc
= gedf2_libfunc
;
3618 libfunc
= unorddf2_libfunc
;
3624 else if (mode
== XFmode
)
3628 libfunc
= eqxf2_libfunc
;
3632 libfunc
= nexf2_libfunc
;
3636 libfunc
= gtxf2_libfunc
;
3637 if (libfunc
== NULL_RTX
)
3639 tmp
= x
; x
= y
; y
= tmp
;
3641 libfunc
= ltxf2_libfunc
;
3646 libfunc
= gexf2_libfunc
;
3647 if (libfunc
== NULL_RTX
)
3649 tmp
= x
; x
= y
; y
= tmp
;
3651 libfunc
= lexf2_libfunc
;
3656 libfunc
= ltxf2_libfunc
;
3657 if (libfunc
== NULL_RTX
)
3659 tmp
= x
; x
= y
; y
= tmp
;
3661 libfunc
= gtxf2_libfunc
;
3666 libfunc
= lexf2_libfunc
;
3667 if (libfunc
== NULL_RTX
)
3669 tmp
= x
; x
= y
; y
= tmp
;
3671 libfunc
= gexf2_libfunc
;
3676 libfunc
= unordxf2_libfunc
;
3682 else if (mode
== TFmode
)
3686 libfunc
= eqtf2_libfunc
;
3690 libfunc
= netf2_libfunc
;
3694 libfunc
= gttf2_libfunc
;
3695 if (libfunc
== NULL_RTX
)
3697 tmp
= x
; x
= y
; y
= tmp
;
3699 libfunc
= lttf2_libfunc
;
3704 libfunc
= getf2_libfunc
;
3705 if (libfunc
== NULL_RTX
)
3707 tmp
= x
; x
= y
; y
= tmp
;
3709 libfunc
= letf2_libfunc
;
3714 libfunc
= lttf2_libfunc
;
3715 if (libfunc
== NULL_RTX
)
3717 tmp
= x
; x
= y
; y
= tmp
;
3719 libfunc
= gttf2_libfunc
;
3724 libfunc
= letf2_libfunc
;
3725 if (libfunc
== NULL_RTX
)
3727 tmp
= x
; x
= y
; y
= tmp
;
3729 libfunc
= getf2_libfunc
;
3734 libfunc
= unordtf2_libfunc
;
3742 enum machine_mode wider_mode
;
3744 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3745 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3747 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3748 != CODE_FOR_nothing
)
3749 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3751 x
= protect_from_queue (x
, 0);
3752 y
= protect_from_queue (y
, 0);
3753 *px
= convert_to_mode (wider_mode
, x
, 0);
3754 *py
= convert_to_mode (wider_mode
, y
, 0);
3755 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3765 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
, y
,
3768 /* Immediately move the result of the libcall into a pseudo
3769 register so reload doesn't clobber the value if it needs
3770 the return register for a spill reg. */
3771 result
= gen_reg_rtx (word_mode
);
3772 emit_move_insn (result
, hard_libcall_value (word_mode
));
3776 if (comparison
== UNORDERED
)
3778 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
3779 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3785 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3788 emit_indirect_jump (loc
)
3791 if (! ((*insn_data
[(int)CODE_FOR_indirect_jump
].operand
[0].predicate
)
3793 loc
= copy_to_mode_reg (Pmode
, loc
);
3795 emit_jump_insn (gen_indirect_jump (loc
));
3799 #ifdef HAVE_conditional_move
3801 /* Emit a conditional move instruction if the machine supports one for that
3802 condition and machine mode.
3804 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3805 the mode to use should they be constants. If it is VOIDmode, they cannot
3808 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3809 should be stored there. MODE is the mode to use should they be constants.
3810 If it is VOIDmode, they cannot both be constants.
3812 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3813 is not supported. */
3816 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3821 enum machine_mode cmode
;
3823 enum machine_mode mode
;
3826 rtx tem
, subtarget
, comparison
, insn
;
3827 enum insn_code icode
;
3828 enum rtx_code reversed
;
3830 /* If one operand is constant, make it the second one. Only do this
3831 if the other operand is not constant as well. */
3833 if (swap_commutative_operands_p (op0
, op1
))
3838 code
= swap_condition (code
);
3841 /* get_condition will prefer to generate LT and GT even if the old
3842 comparison was against zero, so undo that canonicalization here since
3843 comparisons against zero are cheaper. */
3844 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
3845 code
= LE
, op1
= const0_rtx
;
3846 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
3847 code
= GE
, op1
= const0_rtx
;
3849 if (cmode
== VOIDmode
)
3850 cmode
= GET_MODE (op0
);
3852 if (swap_commutative_operands_p (op2
, op3
)
3853 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3862 if (mode
== VOIDmode
)
3863 mode
= GET_MODE (op2
);
3865 icode
= movcc_gen_code
[mode
];
3867 if (icode
== CODE_FOR_nothing
)
3872 op2
= force_not_mem (op2
);
3873 op3
= force_not_mem (op3
);
3877 target
= protect_from_queue (target
, 1);
3879 target
= gen_reg_rtx (mode
);
3885 op2
= protect_from_queue (op2
, 0);
3886 op3
= protect_from_queue (op3
, 0);
3888 /* If the insn doesn't accept these operands, put them in pseudos. */
3890 if (! (*insn_data
[icode
].operand
[0].predicate
)
3891 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3892 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3894 if (! (*insn_data
[icode
].operand
[2].predicate
)
3895 (op2
, insn_data
[icode
].operand
[2].mode
))
3896 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3898 if (! (*insn_data
[icode
].operand
[3].predicate
)
3899 (op3
, insn_data
[icode
].operand
[3].mode
))
3900 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3902 /* Everything should now be in the suitable form, so emit the compare insn
3903 and then the conditional move. */
3906 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3908 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3909 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3910 return NULL and let the caller figure out how best to deal with this
3912 if (GET_CODE (comparison
) != code
)
3915 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3917 /* If that failed, then give up. */
3923 if (subtarget
!= target
)
3924 convert_move (target
, subtarget
, 0);
3929 /* Return non-zero if a conditional move of mode MODE is supported.
3931 This function is for combine so it can tell whether an insn that looks
3932 like a conditional move is actually supported by the hardware. If we
3933 guess wrong we lose a bit on optimization, but that's it. */
3934 /* ??? sparc64 supports conditionally moving integers values based on fp
3935 comparisons, and vice versa. How do we handle them? */
3938 can_conditionally_move_p (mode
)
3939 enum machine_mode mode
;
3941 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3947 #endif /* HAVE_conditional_move */
3949 /* These functions generate an insn body and return it
3950 rather than emitting the insn.
3952 They do not protect from queued increments,
3953 because they may be used 1) in protect_from_queue itself
3954 and 2) in other passes where there is no queue. */
3956 /* Generate and return an insn body to add Y to X. */
3959 gen_add2_insn (x
, y
)
3962 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3964 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3965 (x
, insn_data
[icode
].operand
[0].mode
))
3966 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3967 (x
, insn_data
[icode
].operand
[1].mode
))
3968 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3969 (y
, insn_data
[icode
].operand
[2].mode
)))
3972 return (GEN_FCN (icode
) (x
, x
, y
));
3975 /* Generate and return an insn body to add r1 and c,
3976 storing the result in r0. */
3978 gen_add3_insn (r0
, r1
, c
)
3981 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3983 if (icode
== CODE_FOR_nothing
3984 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3985 (r0
, insn_data
[icode
].operand
[0].mode
))
3986 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3987 (r1
, insn_data
[icode
].operand
[1].mode
))
3988 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3989 (c
, insn_data
[icode
].operand
[2].mode
)))
3992 return (GEN_FCN (icode
) (r0
, r1
, c
));
3996 have_add2_insn (x
, y
)
4001 if (GET_MODE (x
) == VOIDmode
)
4004 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4006 if (icode
== CODE_FOR_nothing
)
4009 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4010 (x
, insn_data
[icode
].operand
[0].mode
))
4011 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4012 (x
, insn_data
[icode
].operand
[1].mode
))
4013 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4014 (y
, insn_data
[icode
].operand
[2].mode
)))
4020 /* Generate and return an insn body to subtract Y from X. */
4023 gen_sub2_insn (x
, y
)
4026 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4028 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4029 (x
, insn_data
[icode
].operand
[0].mode
))
4030 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4031 (x
, insn_data
[icode
].operand
[1].mode
))
4032 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4033 (y
, insn_data
[icode
].operand
[2].mode
)))
4036 return (GEN_FCN (icode
) (x
, x
, y
));
4039 /* Generate and return an insn body to subtract r1 and c,
4040 storing the result in r0. */
4042 gen_sub3_insn (r0
, r1
, c
)
4045 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4047 if (icode
== CODE_FOR_nothing
4048 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4049 (r0
, insn_data
[icode
].operand
[0].mode
))
4050 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4051 (r1
, insn_data
[icode
].operand
[1].mode
))
4052 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4053 (c
, insn_data
[icode
].operand
[2].mode
)))
4056 return (GEN_FCN (icode
) (r0
, r1
, c
));
4060 have_sub2_insn (x
, y
)
4065 if (GET_MODE (x
) == VOIDmode
)
4068 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4070 if (icode
== CODE_FOR_nothing
)
4073 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4074 (x
, insn_data
[icode
].operand
[0].mode
))
4075 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4076 (x
, insn_data
[icode
].operand
[1].mode
))
4077 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4078 (y
, insn_data
[icode
].operand
[2].mode
)))
4084 /* Generate the body of an instruction to copy Y into X.
4085 It may be a SEQUENCE, if one insn isn't enough. */
4088 gen_move_insn (x
, y
)
4091 enum machine_mode mode
= GET_MODE (x
);
4092 enum insn_code insn_code
;
4095 if (mode
== VOIDmode
)
4096 mode
= GET_MODE (y
);
4098 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
4100 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
4101 find a mode to do it in. If we have a movcc, use it. Otherwise,
4102 find the MODE_INT mode of the same width. */
4104 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
4106 enum machine_mode tmode
= VOIDmode
;
4110 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
4113 for (tmode
= QImode
; tmode
!= VOIDmode
;
4114 tmode
= GET_MODE_WIDER_MODE (tmode
))
4115 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
4118 if (tmode
== VOIDmode
)
4121 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
4122 may call change_address which is not appropriate if we were
4123 called when a reload was in progress. We don't have to worry
4124 about changing the address since the size in bytes is supposed to
4125 be the same. Copy the MEM to change the mode and move any
4126 substitutions from the old MEM to the new one. */
4128 if (reload_in_progress
)
4130 x
= gen_lowpart_common (tmode
, x1
);
4131 if (x
== 0 && GET_CODE (x1
) == MEM
)
4133 x
= adjust_address_nv (x1
, tmode
, 0);
4134 copy_replacements (x1
, x
);
4137 y
= gen_lowpart_common (tmode
, y1
);
4138 if (y
== 0 && GET_CODE (y1
) == MEM
)
4140 y
= adjust_address_nv (y1
, tmode
, 0);
4141 copy_replacements (y1
, y
);
4146 x
= gen_lowpart (tmode
, x
);
4147 y
= gen_lowpart (tmode
, y
);
4150 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
4151 return (GEN_FCN (insn_code
) (x
, y
));
4155 emit_move_insn_1 (x
, y
);
4156 seq
= gen_sequence ();
4161 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4162 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4163 no such operation exists, CODE_FOR_nothing will be returned. */
4166 can_extend_p (to_mode
, from_mode
, unsignedp
)
4167 enum machine_mode to_mode
, from_mode
;
4170 #ifdef HAVE_ptr_extend
4172 return CODE_FOR_ptr_extend
;
4175 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4178 /* Generate the body of an insn to extend Y (with mode MFROM)
4179 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4182 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4184 enum machine_mode mto
, mfrom
;
4187 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4190 /* can_fix_p and can_float_p say whether the target machine
4191 can directly convert a given fixed point type to
4192 a given floating point type, or vice versa.
4193 The returned value is the CODE_FOR_... value to use,
4194 or CODE_FOR_nothing if these modes cannot be directly converted.
4196 *TRUNCP_PTR is set to 1 if it is necessary to output
4197 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4199 static enum insn_code
4200 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4201 enum machine_mode fltmode
, fixmode
;
4206 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4207 != CODE_FOR_nothing
)
4208 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4210 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4213 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4215 return CODE_FOR_nothing
;
4218 static enum insn_code
4219 can_float_p (fltmode
, fixmode
, unsignedp
)
4220 enum machine_mode fixmode
, fltmode
;
4223 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4226 /* Generate code to convert FROM to floating point
4227 and store in TO. FROM must be fixed point and not VOIDmode.
4228 UNSIGNEDP nonzero means regard FROM as unsigned.
4229 Normally this is done by correcting the final value
4230 if it is negative. */
4233 expand_float (to
, from
, unsignedp
)
4237 enum insn_code icode
;
4239 enum machine_mode fmode
, imode
;
4241 /* Crash now, because we won't be able to decide which mode to use. */
4242 if (GET_MODE (from
) == VOIDmode
)
4245 /* Look for an insn to do the conversion. Do it in the specified
4246 modes if possible; otherwise convert either input, output or both to
4247 wider mode. If the integer mode is wider than the mode of FROM,
4248 we can do the conversion signed even if the input is unsigned. */
4250 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4251 imode
= GET_MODE_WIDER_MODE (imode
))
4252 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4253 fmode
= GET_MODE_WIDER_MODE (fmode
))
4255 int doing_unsigned
= unsignedp
;
4257 if (fmode
!= GET_MODE (to
)
4258 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4261 icode
= can_float_p (fmode
, imode
, unsignedp
);
4262 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4263 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4265 if (icode
!= CODE_FOR_nothing
)
4267 to
= protect_from_queue (to
, 1);
4268 from
= protect_from_queue (from
, 0);
4270 if (imode
!= GET_MODE (from
))
4271 from
= convert_to_mode (imode
, from
, unsignedp
);
4273 if (fmode
!= GET_MODE (to
))
4274 target
= gen_reg_rtx (fmode
);
4276 emit_unop_insn (icode
, target
, from
,
4277 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4280 convert_move (to
, target
, 0);
4285 /* Unsigned integer, and no way to convert directly.
4286 Convert as signed, then conditionally adjust the result. */
4289 rtx label
= gen_label_rtx ();
4291 REAL_VALUE_TYPE offset
;
4295 to
= protect_from_queue (to
, 1);
4296 from
= protect_from_queue (from
, 0);
4299 from
= force_not_mem (from
);
4301 /* Look for a usable floating mode FMODE wider than the source and at
4302 least as wide as the target. Using FMODE will avoid rounding woes
4303 with unsigned values greater than the signed maximum value. */
4305 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4306 fmode
= GET_MODE_WIDER_MODE (fmode
))
4307 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4308 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4311 if (fmode
== VOIDmode
)
4313 /* There is no such mode. Pretend the target is wide enough. */
4314 fmode
= GET_MODE (to
);
4316 /* Avoid double-rounding when TO is narrower than FROM. */
4317 if ((significand_size (fmode
) + 1)
4318 < GET_MODE_BITSIZE (GET_MODE (from
)))
4321 rtx neglabel
= gen_label_rtx ();
4323 /* Don't use TARGET if it isn't a register, is a hard register,
4324 or is the wrong mode. */
4325 if (GET_CODE (target
) != REG
4326 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4327 || GET_MODE (target
) != fmode
)
4328 target
= gen_reg_rtx (fmode
);
4330 imode
= GET_MODE (from
);
4331 do_pending_stack_adjust ();
4333 /* Test whether the sign bit is set. */
4334 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4337 /* The sign bit is not set. Convert as signed. */
4338 expand_float (target
, from
, 0);
4339 emit_jump_insn (gen_jump (label
));
4342 /* The sign bit is set.
4343 Convert to a usable (positive signed) value by shifting right
4344 one bit, while remembering if a nonzero bit was shifted
4345 out; i.e., compute (from & 1) | (from >> 1). */
4347 emit_label (neglabel
);
4348 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4349 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4350 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4352 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4354 expand_float (target
, temp
, 0);
4356 /* Multiply by 2 to undo the shift above. */
4357 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4358 target
, 0, OPTAB_LIB_WIDEN
);
4360 emit_move_insn (target
, temp
);
4362 do_pending_stack_adjust ();
4368 /* If we are about to do some arithmetic to correct for an
4369 unsigned operand, do it in a pseudo-register. */
4371 if (GET_MODE (to
) != fmode
4372 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4373 target
= gen_reg_rtx (fmode
);
4375 /* Convert as signed integer to floating. */
4376 expand_float (target
, from
, 0);
4378 /* If FROM is negative (and therefore TO is negative),
4379 correct its value by 2**bitwidth. */
4381 do_pending_stack_adjust ();
4382 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4385 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
4386 Rather than setting up a dconst_dot_5, let's hope SCO
4388 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
4389 temp
= expand_binop (fmode
, add_optab
, target
,
4390 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4391 target
, 0, OPTAB_LIB_WIDEN
);
4393 emit_move_insn (target
, temp
);
4395 do_pending_stack_adjust ();
4400 /* No hardware instruction available; call a library routine to convert from
4401 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4407 to
= protect_from_queue (to
, 1);
4408 from
= protect_from_queue (from
, 0);
4410 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4411 from
= convert_to_mode (SImode
, from
, unsignedp
);
4414 from
= force_not_mem (from
);
4416 if (GET_MODE (to
) == SFmode
)
4418 if (GET_MODE (from
) == SImode
)
4419 libfcn
= floatsisf_libfunc
;
4420 else if (GET_MODE (from
) == DImode
)
4421 libfcn
= floatdisf_libfunc
;
4422 else if (GET_MODE (from
) == TImode
)
4423 libfcn
= floattisf_libfunc
;
4427 else if (GET_MODE (to
) == DFmode
)
4429 if (GET_MODE (from
) == SImode
)
4430 libfcn
= floatsidf_libfunc
;
4431 else if (GET_MODE (from
) == DImode
)
4432 libfcn
= floatdidf_libfunc
;
4433 else if (GET_MODE (from
) == TImode
)
4434 libfcn
= floattidf_libfunc
;
4438 else if (GET_MODE (to
) == XFmode
)
4440 if (GET_MODE (from
) == SImode
)
4441 libfcn
= floatsixf_libfunc
;
4442 else if (GET_MODE (from
) == DImode
)
4443 libfcn
= floatdixf_libfunc
;
4444 else if (GET_MODE (from
) == TImode
)
4445 libfcn
= floattixf_libfunc
;
4449 else if (GET_MODE (to
) == TFmode
)
4451 if (GET_MODE (from
) == SImode
)
4452 libfcn
= floatsitf_libfunc
;
4453 else if (GET_MODE (from
) == DImode
)
4454 libfcn
= floatditf_libfunc
;
4455 else if (GET_MODE (from
) == TImode
)
4456 libfcn
= floattitf_libfunc
;
4465 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4466 GET_MODE (to
), 1, from
,
4468 insns
= get_insns ();
4471 emit_libcall_block (insns
, target
, value
,
4472 gen_rtx_FLOAT (GET_MODE (to
), from
));
4477 /* Copy result to requested destination
4478 if we have been computing in a temp location. */
4482 if (GET_MODE (target
) == GET_MODE (to
))
4483 emit_move_insn (to
, target
);
4485 convert_move (to
, target
, 0);
4489 /* expand_fix: generate code to convert FROM to fixed point
4490 and store in TO. FROM must be floating point. */
4496 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4497 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4501 expand_fix (to
, from
, unsignedp
)
4505 enum insn_code icode
;
4507 enum machine_mode fmode
, imode
;
4511 /* We first try to find a pair of modes, one real and one integer, at
4512 least as wide as FROM and TO, respectively, in which we can open-code
4513 this conversion. If the integer mode is wider than the mode of TO,
4514 we can do the conversion either signed or unsigned. */
4516 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4517 fmode
= GET_MODE_WIDER_MODE (fmode
))
4518 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4519 imode
= GET_MODE_WIDER_MODE (imode
))
4521 int doing_unsigned
= unsignedp
;
4523 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4524 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4525 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4527 if (icode
!= CODE_FOR_nothing
)
4529 to
= protect_from_queue (to
, 1);
4530 from
= protect_from_queue (from
, 0);
4532 if (fmode
!= GET_MODE (from
))
4533 from
= convert_to_mode (fmode
, from
, 0);
4536 from
= ftruncify (from
);
4538 if (imode
!= GET_MODE (to
))
4539 target
= gen_reg_rtx (imode
);
4541 emit_unop_insn (icode
, target
, from
,
4542 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4544 convert_move (to
, target
, unsignedp
);
4549 /* For an unsigned conversion, there is one more way to do it.
4550 If we have a signed conversion, we generate code that compares
4551 the real value to the largest representable positive number. If if
4552 is smaller, the conversion is done normally. Otherwise, subtract
4553 one plus the highest signed number, convert, and add it back.
4555 We only need to check all real modes, since we know we didn't find
4556 anything with a wider integer mode. */
4558 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4559 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4560 fmode
= GET_MODE_WIDER_MODE (fmode
))
4561 /* Make sure we won't lose significant bits doing this. */
4562 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
4563 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4567 REAL_VALUE_TYPE offset
;
4568 rtx limit
, lab1
, lab2
, insn
;
4570 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4571 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
4572 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4573 lab1
= gen_label_rtx ();
4574 lab2
= gen_label_rtx ();
4577 to
= protect_from_queue (to
, 1);
4578 from
= protect_from_queue (from
, 0);
4581 from
= force_not_mem (from
);
4583 if (fmode
!= GET_MODE (from
))
4584 from
= convert_to_mode (fmode
, from
, 0);
4586 /* See if we need to do the subtraction. */
4587 do_pending_stack_adjust ();
4588 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4591 /* If not, do the signed "fix" and branch around fixup code. */
4592 expand_fix (to
, from
, 0);
4593 emit_jump_insn (gen_jump (lab2
));
4596 /* Otherwise, subtract 2**(N-1), convert to signed number,
4597 then add 2**(N-1). Do the addition using XOR since this
4598 will often generate better code. */
4600 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4601 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4602 expand_fix (to
, target
, 0);
4603 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4605 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4607 to
, 1, OPTAB_LIB_WIDEN
);
4610 emit_move_insn (to
, target
);
4614 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4615 != CODE_FOR_nothing
)
4617 /* Make a place for a REG_NOTE and add it. */
4618 insn
= emit_move_insn (to
, to
);
4619 set_unique_reg_note (insn
,
4621 gen_rtx_fmt_e (UNSIGNED_FIX
,
4629 /* We can't do it with an insn, so use a library call. But first ensure
4630 that the mode of TO is at least as wide as SImode, since those are the
4631 only library calls we know about. */
4633 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4635 target
= gen_reg_rtx (SImode
);
4637 expand_fix (target
, from
, unsignedp
);
4639 else if (GET_MODE (from
) == SFmode
)
4641 if (GET_MODE (to
) == SImode
)
4642 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
4643 else if (GET_MODE (to
) == DImode
)
4644 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
4645 else if (GET_MODE (to
) == TImode
)
4646 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
4650 else if (GET_MODE (from
) == DFmode
)
4652 if (GET_MODE (to
) == SImode
)
4653 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
4654 else if (GET_MODE (to
) == DImode
)
4655 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
4656 else if (GET_MODE (to
) == TImode
)
4657 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
4661 else if (GET_MODE (from
) == XFmode
)
4663 if (GET_MODE (to
) == SImode
)
4664 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
4665 else if (GET_MODE (to
) == DImode
)
4666 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
4667 else if (GET_MODE (to
) == TImode
)
4668 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
4672 else if (GET_MODE (from
) == TFmode
)
4674 if (GET_MODE (to
) == SImode
)
4675 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
4676 else if (GET_MODE (to
) == DImode
)
4677 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
4678 else if (GET_MODE (to
) == TImode
)
4679 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
4691 to
= protect_from_queue (to
, 1);
4692 from
= protect_from_queue (from
, 0);
4695 from
= force_not_mem (from
);
4699 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4700 GET_MODE (to
), 1, from
,
4702 insns
= get_insns ();
4705 emit_libcall_block (insns
, target
, value
,
4706 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4707 GET_MODE (to
), from
));
4712 if (GET_MODE (to
) == GET_MODE (target
))
4713 emit_move_insn (to
, target
);
4715 convert_move (to
, target
, 0);
4719 /* Report whether we have an instruction to perform the operation
4720 specified by CODE on operands of mode MODE. */
4722 have_insn_for (code
, mode
)
4724 enum machine_mode mode
;
4726 return (code_to_optab
[(int) code
] != 0
4727 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4728 != CODE_FOR_nothing
));
4731 /* Create a blank optab. */
4736 optab op
= (optab
) xmalloc (sizeof (struct optab
));
4737 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4739 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4740 op
->handlers
[i
].libfunc
= 0;
4746 /* Same, but fill in its code as CODE, and write it into the
4747 code_to_optab table. */
4752 optab op
= new_optab ();
4754 code_to_optab
[(int) code
] = op
;
4758 /* Same, but fill in its code as CODE, and do _not_ write it into
4759 the code_to_optab table. */
4764 optab op
= new_optab ();
4769 /* Initialize the libfunc fields of an entire group of entries in some
4770 optab. Each entry is set equal to a string consisting of a leading
4771 pair of underscores followed by a generic operation name followed by
4772 a mode name (downshifted to lower case) followed by a single character
4773 representing the number of operands for the given operation (which is
4774 usually one of the characters '2', '3', or '4').
4776 OPTABLE is the table in which libfunc fields are to be initialized.
4777 FIRST_MODE is the first machine mode index in the given optab to
4779 LAST_MODE is the last machine mode index in the given optab to
4781 OPNAME is the generic (string) name of the operation.
4782 SUFFIX is the character which specifies the number of operands for
4783 the given generic operation.
4787 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
4795 unsigned opname_len
= strlen (opname
);
4797 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4798 mode
= (enum machine_mode
) ((int) mode
+ 1))
4800 const char *mname
= GET_MODE_NAME(mode
);
4801 unsigned mname_len
= strlen (mname
);
4802 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4809 for (q
= opname
; *q
; )
4811 for (q
= mname
; *q
; q
++)
4812 *p
++ = TOLOWER (*q
);
4816 optable
->handlers
[(int) mode
].libfunc
4817 = gen_rtx_SYMBOL_REF (Pmode
, ggc_alloc_string (libfunc_name
,
4822 /* Initialize the libfunc fields of an entire group of entries in some
4823 optab which correspond to all integer mode operations. The parameters
4824 have the same meaning as similarly named ones for the `init_libfuncs'
4825 routine. (See above). */
4828 init_integral_libfuncs (optable
, opname
, suffix
)
4833 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
4836 /* Initialize the libfunc fields of an entire group of entries in some
4837 optab which correspond to all real mode operations. The parameters
4838 have the same meaning as similarly named ones for the `init_libfuncs'
4839 routine. (See above). */
4842 init_floating_libfuncs (optable
, opname
, suffix
)
4847 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
4851 init_one_libfunc (name
)
4854 /* Create a FUNCTION_DECL that can be passed to
4855 targetm.encode_section_info. */
4856 /* ??? We don't have any type information except for this is
4857 a function. Pretend this is "int foo()". */
4858 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4859 build_function_type (integer_type_node
, NULL_TREE
));
4860 DECL_ARTIFICIAL (decl
) = 1;
4861 DECL_EXTERNAL (decl
) = 1;
4862 TREE_PUBLIC (decl
) = 1;
4864 /* Return the symbol_ref from the mem rtx. */
4865 return XEXP (DECL_RTL (decl
), 0);
4868 /* Mark ARG (which is really an OPTAB *) for GC. */
4874 optab o
= *(optab
*) arg
;
4877 for (i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
4878 ggc_mark_rtx (o
->handlers
[i
].libfunc
);
4881 /* Call this once to initialize the contents of the optabs
4882 appropriately for the current target machine. */
4887 unsigned int i
, j
, k
;
4889 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4891 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
4892 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
4893 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
4894 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
4896 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
4897 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
4898 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
4899 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
4901 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
4902 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
4903 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
4904 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
4906 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
4907 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
4908 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
4909 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
4911 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4912 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4914 #ifdef HAVE_conditional_move
4915 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4916 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4919 add_optab
= init_optab (PLUS
);
4920 addv_optab
= init_optabv (PLUS
);
4921 sub_optab
= init_optab (MINUS
);
4922 subv_optab
= init_optabv (MINUS
);
4923 smul_optab
= init_optab (MULT
);
4924 smulv_optab
= init_optabv (MULT
);
4925 smul_highpart_optab
= init_optab (UNKNOWN
);
4926 umul_highpart_optab
= init_optab (UNKNOWN
);
4927 smul_widen_optab
= init_optab (UNKNOWN
);
4928 umul_widen_optab
= init_optab (UNKNOWN
);
4929 sdiv_optab
= init_optab (DIV
);
4930 sdivv_optab
= init_optabv (DIV
);
4931 sdivmod_optab
= init_optab (UNKNOWN
);
4932 udiv_optab
= init_optab (UDIV
);
4933 udivmod_optab
= init_optab (UNKNOWN
);
4934 smod_optab
= init_optab (MOD
);
4935 umod_optab
= init_optab (UMOD
);
4936 ftrunc_optab
= init_optab (UNKNOWN
);
4937 and_optab
= init_optab (AND
);
4938 ior_optab
= init_optab (IOR
);
4939 xor_optab
= init_optab (XOR
);
4940 ashl_optab
= init_optab (ASHIFT
);
4941 ashr_optab
= init_optab (ASHIFTRT
);
4942 lshr_optab
= init_optab (LSHIFTRT
);
4943 rotl_optab
= init_optab (ROTATE
);
4944 rotr_optab
= init_optab (ROTATERT
);
4945 smin_optab
= init_optab (SMIN
);
4946 smax_optab
= init_optab (SMAX
);
4947 umin_optab
= init_optab (UMIN
);
4948 umax_optab
= init_optab (UMAX
);
4950 /* These three have codes assigned exclusively for the sake of
4952 mov_optab
= init_optab (SET
);
4953 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4954 cmp_optab
= init_optab (COMPARE
);
4956 ucmp_optab
= init_optab (UNKNOWN
);
4957 tst_optab
= init_optab (UNKNOWN
);
4958 neg_optab
= init_optab (NEG
);
4959 negv_optab
= init_optabv (NEG
);
4960 abs_optab
= init_optab (ABS
);
4961 absv_optab
= init_optabv (ABS
);
4962 one_cmpl_optab
= init_optab (NOT
);
4963 ffs_optab
= init_optab (FFS
);
4964 sqrt_optab
= init_optab (SQRT
);
4965 sin_optab
= init_optab (UNKNOWN
);
4966 cos_optab
= init_optab (UNKNOWN
);
4967 strlen_optab
= init_optab (UNKNOWN
);
4968 cbranch_optab
= init_optab (UNKNOWN
);
4969 cmov_optab
= init_optab (UNKNOWN
);
4970 cstore_optab
= init_optab (UNKNOWN
);
4971 push_optab
= init_optab (UNKNOWN
);
4973 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4975 movstr_optab
[i
] = CODE_FOR_nothing
;
4976 clrstr_optab
[i
] = CODE_FOR_nothing
;
4978 #ifdef HAVE_SECONDARY_RELOADS
4979 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4983 /* Fill in the optabs with the insns we support. */
4986 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4987 /* This flag says the same insns that convert to a signed fixnum
4988 also convert validly to an unsigned one. */
4989 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4990 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4991 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4994 /* Initialize the optabs with the names of the library functions. */
4995 init_integral_libfuncs (add_optab
, "add", '3');
4996 init_floating_libfuncs (add_optab
, "add", '3');
4997 init_integral_libfuncs (addv_optab
, "addv", '3');
4998 init_floating_libfuncs (addv_optab
, "add", '3');
4999 init_integral_libfuncs (sub_optab
, "sub", '3');
5000 init_floating_libfuncs (sub_optab
, "sub", '3');
5001 init_integral_libfuncs (subv_optab
, "subv", '3');
5002 init_floating_libfuncs (subv_optab
, "sub", '3');
5003 init_integral_libfuncs (smul_optab
, "mul", '3');
5004 init_floating_libfuncs (smul_optab
, "mul", '3');
5005 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5006 init_floating_libfuncs (smulv_optab
, "mul", '3');
5007 init_integral_libfuncs (sdiv_optab
, "div", '3');
5008 init_floating_libfuncs (sdiv_optab
, "div", '3');
5009 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5010 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5011 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5012 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5013 init_integral_libfuncs (smod_optab
, "mod", '3');
5014 init_integral_libfuncs (umod_optab
, "umod", '3');
5015 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5016 init_integral_libfuncs (and_optab
, "and", '3');
5017 init_integral_libfuncs (ior_optab
, "ior", '3');
5018 init_integral_libfuncs (xor_optab
, "xor", '3');
5019 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5020 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5021 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5022 init_integral_libfuncs (smin_optab
, "min", '3');
5023 init_floating_libfuncs (smin_optab
, "min", '3');
5024 init_integral_libfuncs (smax_optab
, "max", '3');
5025 init_floating_libfuncs (smax_optab
, "max", '3');
5026 init_integral_libfuncs (umin_optab
, "umin", '3');
5027 init_integral_libfuncs (umax_optab
, "umax", '3');
5028 init_integral_libfuncs (neg_optab
, "neg", '2');
5029 init_floating_libfuncs (neg_optab
, "neg", '2');
5030 init_integral_libfuncs (negv_optab
, "negv", '2');
5031 init_floating_libfuncs (negv_optab
, "neg", '2');
5032 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5033 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5035 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5036 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5037 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5038 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5040 #ifdef MULSI3_LIBCALL
5041 smul_optab
->handlers
[(int) SImode
].libfunc
5042 = init_one_libfunc (MULSI3_LIBCALL
);
5044 #ifdef MULDI3_LIBCALL
5045 smul_optab
->handlers
[(int) DImode
].libfunc
5046 = init_one_libfunc (MULDI3_LIBCALL
);
5049 #ifdef DIVSI3_LIBCALL
5050 sdiv_optab
->handlers
[(int) SImode
].libfunc
5051 = init_one_libfunc (DIVSI3_LIBCALL
);
5053 #ifdef DIVDI3_LIBCALL
5054 sdiv_optab
->handlers
[(int) DImode
].libfunc
5055 = init_one_libfunc (DIVDI3_LIBCALL
);
5058 #ifdef UDIVSI3_LIBCALL
5059 udiv_optab
->handlers
[(int) SImode
].libfunc
5060 = init_one_libfunc (UDIVSI3_LIBCALL
);
5062 #ifdef UDIVDI3_LIBCALL
5063 udiv_optab
->handlers
[(int) DImode
].libfunc
5064 = init_one_libfunc (UDIVDI3_LIBCALL
);
5067 #ifdef MODSI3_LIBCALL
5068 smod_optab
->handlers
[(int) SImode
].libfunc
5069 = init_one_libfunc (MODSI3_LIBCALL
);
5071 #ifdef MODDI3_LIBCALL
5072 smod_optab
->handlers
[(int) DImode
].libfunc
5073 = init_one_libfunc (MODDI3_LIBCALL
);
5076 #ifdef UMODSI3_LIBCALL
5077 umod_optab
->handlers
[(int) SImode
].libfunc
5078 = init_one_libfunc (UMODSI3_LIBCALL
);
5080 #ifdef UMODDI3_LIBCALL
5081 umod_optab
->handlers
[(int) DImode
].libfunc
5082 = init_one_libfunc (UMODDI3_LIBCALL
);
5085 /* Use cabs for DC complex abs, since systems generally have cabs.
5086 Don't define any libcall for SCmode, so that cabs will be used. */
5087 abs_optab
->handlers
[(int) DCmode
].libfunc
5088 = init_one_libfunc ("cabs");
5090 /* The ffs function operates on `int'. */
5091 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5092 = init_one_libfunc ("ffs");
5094 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
5095 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
5096 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
5097 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
5098 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
5100 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
5101 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
5102 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
5103 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
5104 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
5106 abort_libfunc
= init_one_libfunc ("abort");
5107 memcpy_libfunc
= init_one_libfunc ("memcpy");
5108 memmove_libfunc
= init_one_libfunc ("memmove");
5109 bcopy_libfunc
= init_one_libfunc ("bcopy");
5110 memcmp_libfunc
= init_one_libfunc ("memcmp");
5111 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5112 memset_libfunc
= init_one_libfunc ("memset");
5113 bzero_libfunc
= init_one_libfunc ("bzero");
5115 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5116 ? "_Unwind_SjLj_Resume"
5117 : "_Unwind_Resume");
5118 #ifndef DONT_USE_BUILTIN_SETJMP
5119 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5120 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5122 setjmp_libfunc
= init_one_libfunc ("setjmp");
5123 longjmp_libfunc
= init_one_libfunc ("longjmp");
5125 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5126 unwind_sjlj_unregister_libfunc
5127 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5129 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
5130 nehf2_libfunc
= init_one_libfunc ("__nehf2");
5131 gthf2_libfunc
= init_one_libfunc ("__gthf2");
5132 gehf2_libfunc
= init_one_libfunc ("__gehf2");
5133 lthf2_libfunc
= init_one_libfunc ("__lthf2");
5134 lehf2_libfunc
= init_one_libfunc ("__lehf2");
5135 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
5137 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
5138 nesf2_libfunc
= init_one_libfunc ("__nesf2");
5139 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
5140 gesf2_libfunc
= init_one_libfunc ("__gesf2");
5141 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
5142 lesf2_libfunc
= init_one_libfunc ("__lesf2");
5143 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
5145 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
5146 nedf2_libfunc
= init_one_libfunc ("__nedf2");
5147 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
5148 gedf2_libfunc
= init_one_libfunc ("__gedf2");
5149 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
5150 ledf2_libfunc
= init_one_libfunc ("__ledf2");
5151 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
5153 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
5154 nexf2_libfunc
= init_one_libfunc ("__nexf2");
5155 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
5156 gexf2_libfunc
= init_one_libfunc ("__gexf2");
5157 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
5158 lexf2_libfunc
= init_one_libfunc ("__lexf2");
5159 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
5161 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
5162 netf2_libfunc
= init_one_libfunc ("__netf2");
5163 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5164 getf2_libfunc
= init_one_libfunc ("__getf2");
5165 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5166 letf2_libfunc
= init_one_libfunc ("__letf2");
5167 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5169 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5170 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5171 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5173 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5174 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5175 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5177 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5178 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5179 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5181 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5182 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5183 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5185 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5186 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5187 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5189 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5190 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5191 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5193 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5194 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5195 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5197 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5198 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5199 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5201 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5202 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5203 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5205 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5206 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5207 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5209 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5210 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5211 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5213 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5214 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5215 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5217 /* For function entry/exit instrumentation. */
5218 profile_function_entry_libfunc
5219 = init_one_libfunc ("__cyg_profile_func_enter");
5220 profile_function_exit_libfunc
5221 = init_one_libfunc ("__cyg_profile_func_exit");
5223 #ifdef HAVE_conditional_trap
5227 #ifdef INIT_TARGET_OPTABS
5228 /* Allow the target to add more libcalls or rename some, etc. */
5232 /* Add these GC roots. */
5233 ggc_add_root (optab_table
, OTI_MAX
, sizeof(optab
), mark_optab
);
5234 ggc_add_rtx_root (libfunc_table
, LTI_MAX
);
5237 #ifdef HAVE_conditional_trap
5238 /* The insn generating function can not take an rtx_code argument.
5239 TRAP_RTX is used as an rtx argument. Its code is replaced with
5240 the code to be used in the trap insn and all other fields are
5242 static rtx trap_rtx
;
5247 if (HAVE_conditional_trap
)
5249 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5250 ggc_add_rtx_root (&trap_rtx
, 1);
5255 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5256 CODE. Return 0 on failure. */
5259 gen_cond_trap (code
, op1
, op2
, tcode
)
5260 enum rtx_code code ATTRIBUTE_UNUSED
;
5261 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5263 enum machine_mode mode
= GET_MODE (op1
);
5265 if (mode
== VOIDmode
)
5268 #ifdef HAVE_conditional_trap
5269 if (HAVE_conditional_trap
5270 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5274 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5275 PUT_CODE (trap_rtx
, code
);
5276 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5280 insn
= gen_sequence ();