1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 /* Include insn-config.h before expr.h so that HAVE_conditional_move
28 is properly defined. */
29 #include "insn-config.h"
44 /* Each optab contains info on how this target machine
45 can perform a particular operation
46 for all sizes and kinds of operands.
48 The operation to be performed is often specified
49 by passing one of these optabs as an argument.
51 See expr.h for documentation of these optabs. */
53 optab optab_table
[OTI_MAX
];
55 rtx libfunc_table
[LTI_MAX
];
57 /* Tables of patterns for extending one integer mode to another. */
58 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
60 /* Tables of patterns for converting between fixed and floating point. */
61 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
62 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
63 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab
[NUM_RTX_CODE
+ 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
88 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
89 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
90 enum machine_mode
, int, int));
91 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
92 rtx
, rtx
, enum machine_mode
,
93 int, enum optab_methods
,
94 enum mode_class
, optab
));
95 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
96 rtx
, rtx
, enum machine_mode
,
97 int, enum optab_methods
,
98 enum mode_class
, optab
));
99 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
100 enum machine_mode
*, int *,
101 enum can_compare_purpose
));
102 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
104 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
107 static rtx ftruncify
PARAMS ((rtx
));
108 static optab new_optab
PARAMS ((void));
109 static inline optab init_optab
PARAMS ((enum rtx_code
));
110 static inline optab init_optabv
PARAMS ((enum rtx_code
));
111 static inline int complex_part_zero_p
PARAMS ((rtx
, enum mode_class
,
113 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
114 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
115 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
116 #ifdef HAVE_conditional_trap
117 static void init_traps
PARAMS ((void));
119 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
));
121 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *));
123 static rtx expand_vector_binop
PARAMS ((enum machine_mode
, optab
,
125 enum optab_methods
));
126 static rtx expand_vector_unop
PARAMS ((enum machine_mode
, optab
, rtx
, rtx
,
129 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
130 the result of operation CODE applied to OP0 (and OP1 if it is a binary
133 If the last insn does not set TARGET, don't do anything, but return 1.
135 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
136 don't add the REG_EQUAL note but return 0. Our caller can then try
137 again, ensuring that TARGET is not one of the operands. */
140 add_equal_note (insns
, target
, code
, op0
, op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
155 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
173 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
174 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
178 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
179 besides the last insn. */
180 if (reg_overlap_mentioned_p (target
, op0
)
181 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
183 insn
= PREV_INSN (last_insn
);
184 while (insn
!= NULL_RTX
)
186 if (reg_set_p (target
, insn
))
189 insn
= PREV_INSN (insn
);
193 if (GET_RTX_CLASS (code
) == '1')
194 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
196 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
198 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
203 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
204 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
205 not actually do a sign-extend or zero-extend, but can leave the
206 higher-order bits of the result rtx undefined, for example, in the case
207 of logical operations, but not right shifts. */
210 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
212 enum machine_mode mode
, oldmode
;
218 /* If we don't have to extend and this is a constant, return it. */
219 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
222 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
223 extend since it will be more efficient to do so unless the signedness of
224 a promoted object differs from our extension. */
226 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
227 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
228 return convert_modes (mode
, oldmode
, op
, unsignedp
);
230 /* If MODE is no wider than a single word, we return a paradoxical
232 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
233 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
235 /* Otherwise, get an object of MODE, clobber it, and set the low-order
238 result
= gen_reg_rtx (mode
);
239 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
240 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
244 /* Test whether either the real or imaginary part of a complex floating
245 point number is 0.0, so that it can be ignored (when compiling
246 with -funsafe-math-optimizations). */
249 complex_part_zero_p (part
, class, submode
)
251 enum mode_class
class;
252 enum machine_mode submode
;
255 (flag_unsafe_math_optimizations
256 && class == MODE_COMPLEX_FLOAT
257 && part
== CONST0_RTX (submode
));
260 /* Generate code to perform a straightforward complex divide. */
263 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
264 unsignedp
, methods
, class, binoptab
)
265 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
266 enum machine_mode submode
;
268 enum optab_methods methods
;
269 enum mode_class
class;
276 optab this_add_optab
= add_optab
;
277 optab this_sub_optab
= sub_optab
;
278 optab this_neg_optab
= neg_optab
;
279 optab this_mul_optab
= smul_optab
;
281 if (binoptab
== sdivv_optab
)
283 this_add_optab
= addv_optab
;
284 this_sub_optab
= subv_optab
;
285 this_neg_optab
= negv_optab
;
286 this_mul_optab
= smulv_optab
;
289 /* Don't fetch these from memory more than once. */
290 real0
= force_reg (submode
, real0
);
291 real1
= force_reg (submode
, real1
);
294 imag0
= force_reg (submode
, imag0
);
296 imag1
= force_reg (submode
, imag1
);
298 /* Divisor: c*c + d*d. */
299 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
300 NULL_RTX
, unsignedp
, methods
);
302 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
303 NULL_RTX
, unsignedp
, methods
);
305 if (temp1
== 0 || temp2
== 0)
308 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
309 NULL_RTX
, unsignedp
, methods
);
313 if (complex_part_zero_p (imag0
, class, submode
))
315 /* Mathematically, ((a)(c-id))/divisor. */
316 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
318 /* Calculate the dividend. */
319 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
320 NULL_RTX
, unsignedp
, methods
);
322 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
323 NULL_RTX
, unsignedp
, methods
);
325 if (real_t
== 0 || imag_t
== 0)
328 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
329 NULL_RTX
, unsignedp
);
333 /* Mathematically, ((a+ib)(c-id))/divider. */
334 /* Calculate the dividend. */
335 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
336 NULL_RTX
, unsignedp
, methods
);
338 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
339 NULL_RTX
, unsignedp
, methods
);
341 if (temp1
== 0 || temp2
== 0)
344 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
345 NULL_RTX
, unsignedp
, methods
);
347 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
348 NULL_RTX
, unsignedp
, methods
);
350 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
351 NULL_RTX
, unsignedp
, methods
);
353 if (temp1
== 0 || temp2
== 0)
356 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
357 NULL_RTX
, unsignedp
, methods
);
359 if (real_t
== 0 || imag_t
== 0)
363 if (class == MODE_COMPLEX_FLOAT
)
364 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
365 realr
, unsignedp
, methods
);
367 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
368 real_t
, divisor
, realr
, unsignedp
);
374 emit_move_insn (realr
, res
);
376 if (class == MODE_COMPLEX_FLOAT
)
377 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
378 imagr
, unsignedp
, methods
);
380 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
381 imag_t
, divisor
, imagr
, unsignedp
);
387 emit_move_insn (imagr
, res
);
392 /* Generate code to perform a wide-input-range-acceptable complex divide. */
395 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
396 unsignedp
, methods
, class, binoptab
)
397 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
398 enum machine_mode submode
;
400 enum optab_methods methods
;
401 enum mode_class
class;
406 rtx temp1
, temp2
, lab1
, lab2
;
407 enum machine_mode mode
;
409 optab this_add_optab
= add_optab
;
410 optab this_sub_optab
= sub_optab
;
411 optab this_neg_optab
= neg_optab
;
412 optab this_mul_optab
= smul_optab
;
414 if (binoptab
== sdivv_optab
)
416 this_add_optab
= addv_optab
;
417 this_sub_optab
= subv_optab
;
418 this_neg_optab
= negv_optab
;
419 this_mul_optab
= smulv_optab
;
422 /* Don't fetch these from memory more than once. */
423 real0
= force_reg (submode
, real0
);
424 real1
= force_reg (submode
, real1
);
427 imag0
= force_reg (submode
, imag0
);
429 imag1
= force_reg (submode
, imag1
);
431 /* XXX What's an "unsigned" complex number? */
439 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
440 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
443 if (temp1
== 0 || temp2
== 0)
446 mode
= GET_MODE (temp1
);
447 lab1
= gen_label_rtx ();
448 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
449 mode
, unsignedp
, lab1
);
451 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
453 if (class == MODE_COMPLEX_FLOAT
)
454 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
455 NULL_RTX
, unsignedp
, methods
);
457 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
458 imag1
, real1
, NULL_RTX
, unsignedp
);
463 /* Calculate divisor. */
465 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
466 NULL_RTX
, unsignedp
, methods
);
471 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
472 NULL_RTX
, unsignedp
, methods
);
477 /* Calculate dividend. */
479 if (complex_part_zero_p (imag0
, class, submode
))
483 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
485 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
486 NULL_RTX
, unsignedp
, methods
);
491 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
492 NULL_RTX
, unsignedp
);
494 if (real_t
== 0 || imag_t
== 0)
499 /* Compute (a+ib)/(c+id) as
500 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
502 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
503 NULL_RTX
, unsignedp
, methods
);
508 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
509 NULL_RTX
, unsignedp
, methods
);
511 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
512 NULL_RTX
, unsignedp
, methods
);
517 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
518 NULL_RTX
, unsignedp
, methods
);
520 if (real_t
== 0 || imag_t
== 0)
524 if (class == MODE_COMPLEX_FLOAT
)
525 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
526 realr
, unsignedp
, methods
);
528 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
529 real_t
, divisor
, realr
, unsignedp
);
535 emit_move_insn (realr
, res
);
537 if (class == MODE_COMPLEX_FLOAT
)
538 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
539 imagr
, unsignedp
, methods
);
541 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
542 imag_t
, divisor
, imagr
, unsignedp
);
548 emit_move_insn (imagr
, res
);
550 lab2
= gen_label_rtx ();
551 emit_jump_insn (gen_jump (lab2
));
556 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
558 if (class == MODE_COMPLEX_FLOAT
)
559 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
560 NULL_RTX
, unsignedp
, methods
);
562 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
563 real1
, imag1
, NULL_RTX
, unsignedp
);
568 /* Calculate divisor. */
570 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
571 NULL_RTX
, unsignedp
, methods
);
576 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
577 NULL_RTX
, unsignedp
, methods
);
582 /* Calculate dividend. */
584 if (complex_part_zero_p (imag0
, class, submode
))
586 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
588 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
589 NULL_RTX
, unsignedp
, methods
);
591 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
592 NULL_RTX
, unsignedp
);
594 if (real_t
== 0 || imag_t
== 0)
599 /* Compute (a+ib)/(c+id) as
600 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
602 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
603 NULL_RTX
, unsignedp
, methods
);
608 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
609 NULL_RTX
, unsignedp
, methods
);
611 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
612 NULL_RTX
, unsignedp
, methods
);
617 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
618 NULL_RTX
, unsignedp
, methods
);
620 if (real_t
== 0 || imag_t
== 0)
624 if (class == MODE_COMPLEX_FLOAT
)
625 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
626 realr
, unsignedp
, methods
);
628 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
629 real_t
, divisor
, realr
, unsignedp
);
635 emit_move_insn (realr
, res
);
637 if (class == MODE_COMPLEX_FLOAT
)
638 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
639 imagr
, unsignedp
, methods
);
641 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
642 imag_t
, divisor
, imagr
, unsignedp
);
648 emit_move_insn (imagr
, res
);
655 /* Wrapper around expand_binop which takes an rtx code to specify
656 the operation to perform, not an optab pointer. All other
657 arguments are the same. */
659 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
660 enum machine_mode mode
;
665 enum optab_methods methods
;
667 optab binop
= code_to_optab
[(int) code
];
671 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
674 /* Generate code to perform an operation specified by BINOPTAB
675 on operands OP0 and OP1, with result having machine-mode MODE.
677 UNSIGNEDP is for the case where we have to widen the operands
678 to perform the operation. It says to use zero-extension.
680 If TARGET is nonzero, the value
681 is generated there, if it is convenient to do so.
682 In all cases an rtx is returned for the locus of the value;
683 this may or may not be TARGET. */
686 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
687 enum machine_mode mode
;
692 enum optab_methods methods
;
694 enum optab_methods next_methods
695 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
696 ? OPTAB_WIDEN
: methods
);
697 enum mode_class
class;
698 enum machine_mode wider_mode
;
700 int commutative_op
= 0;
701 int shift_op
= (binoptab
->code
== ASHIFT
702 || binoptab
->code
== ASHIFTRT
703 || binoptab
->code
== LSHIFTRT
704 || binoptab
->code
== ROTATE
705 || binoptab
->code
== ROTATERT
);
706 rtx entry_last
= get_last_insn ();
709 class = GET_MODE_CLASS (mode
);
711 op0
= protect_from_queue (op0
, 0);
712 op1
= protect_from_queue (op1
, 0);
714 target
= protect_from_queue (target
, 1);
718 op0
= force_not_mem (op0
);
719 op1
= force_not_mem (op1
);
722 /* If subtracting an integer constant, convert this into an addition of
723 the negated constant. */
725 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
727 op1
= negate_rtx (mode
, op1
);
728 binoptab
= add_optab
;
731 /* If we are inside an appropriately-short loop and one operand is an
732 expensive constant, force it into a register. */
733 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
734 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
735 op0
= force_reg (mode
, op0
);
737 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
738 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
739 op1
= force_reg (mode
, op1
);
741 /* Record where to delete back to if we backtrack. */
742 last
= get_last_insn ();
744 /* If operation is commutative,
745 try to make the first operand a register.
746 Even better, try to make it the same as the target.
747 Also try to make the last operand a constant. */
748 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
749 || binoptab
== smul_widen_optab
750 || binoptab
== umul_widen_optab
751 || binoptab
== smul_highpart_optab
752 || binoptab
== umul_highpart_optab
)
756 if (((target
== 0 || GET_CODE (target
) == REG
)
757 ? ((GET_CODE (op1
) == REG
758 && GET_CODE (op0
) != REG
)
760 : rtx_equal_p (op1
, target
))
761 || GET_CODE (op0
) == CONST_INT
)
769 /* If we can do it with a three-operand insn, do so. */
771 if (methods
!= OPTAB_MUST_WIDEN
772 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
774 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
775 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
776 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
778 rtx xop0
= op0
, xop1
= op1
;
783 temp
= gen_reg_rtx (mode
);
785 /* If it is a commutative operator and the modes would match
786 if we would swap the operands, we can save the conversions. */
789 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
790 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
794 tmp
= op0
; op0
= op1
; op1
= tmp
;
795 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
799 /* In case the insn wants input operands in modes different from
800 the result, convert the operands. It would seem that we
801 don't need to convert CONST_INTs, but we do, so that they're
802 a properly sign-extended for their modes; we choose the
803 widest mode between mode and mode[01], so that, in a widening
804 operation, we call convert_modes with different FROM and TO
805 modes, which ensures the value is sign-extended. Shift
806 operations are an exception, because the second operand needs
807 not be extended to the mode of the result. */
809 if (GET_MODE (op0
) != mode0
810 && mode0
!= VOIDmode
)
811 xop0
= convert_modes (mode0
,
812 GET_MODE (op0
) != VOIDmode
814 : GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode0
)
819 if (GET_MODE (xop1
) != mode1
820 && mode1
!= VOIDmode
)
821 xop1
= convert_modes (mode1
,
822 GET_MODE (op1
) != VOIDmode
824 : (GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode1
)
830 /* Now, if insn's predicates don't allow our operands, put them into
833 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
834 && mode0
!= VOIDmode
)
835 xop0
= copy_to_mode_reg (mode0
, xop0
);
837 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
838 && mode1
!= VOIDmode
)
839 xop1
= copy_to_mode_reg (mode1
, xop1
);
841 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
842 temp
= gen_reg_rtx (mode
);
844 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
847 /* If PAT is composed of more than one insn, try to add an appropriate
848 REG_EQUAL note to it. If we can't because TEMP conflicts with an
849 operand, call ourselves again, this time without a target. */
850 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
851 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
853 delete_insns_since (last
);
854 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
862 delete_insns_since (last
);
865 /* If this is a multiply, see if we can do a widening operation that
866 takes operands of this mode and makes a wider mode. */
868 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
869 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
870 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
871 != CODE_FOR_nothing
))
873 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
874 unsignedp
? umul_widen_optab
: smul_widen_optab
,
875 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
879 if (GET_MODE_CLASS (mode
) == MODE_INT
)
880 return gen_lowpart (mode
, temp
);
882 return convert_to_mode (mode
, temp
, unsignedp
);
886 /* Look for a wider mode of the same class for which we think we
887 can open-code the operation. Check for a widening multiply at the
888 wider mode as well. */
890 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
891 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
892 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
893 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
895 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
896 || (binoptab
== smul_optab
897 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
898 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
899 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
900 != CODE_FOR_nothing
)))
902 rtx xop0
= op0
, xop1
= op1
;
905 /* For certain integer operations, we need not actually extend
906 the narrow operands, as long as we will truncate
907 the results to the same narrowness. */
909 if ((binoptab
== ior_optab
|| binoptab
== and_optab
910 || binoptab
== xor_optab
911 || binoptab
== add_optab
|| binoptab
== sub_optab
912 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
913 && class == MODE_INT
)
916 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
918 /* The second operand of a shift must always be extended. */
919 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
920 no_extend
&& binoptab
!= ashl_optab
);
922 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
923 unsignedp
, OPTAB_DIRECT
);
926 if (class != MODE_INT
)
929 target
= gen_reg_rtx (mode
);
930 convert_move (target
, temp
, 0);
934 return gen_lowpart (mode
, temp
);
937 delete_insns_since (last
);
941 /* These can be done a word at a time. */
942 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
944 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
945 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
951 /* If TARGET is the same as one of the operands, the REG_EQUAL note
952 won't be accurate, so use a new target. */
953 if (target
== 0 || target
== op0
|| target
== op1
)
954 target
= gen_reg_rtx (mode
);
958 /* Do the actual arithmetic. */
959 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
961 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
962 rtx x
= expand_binop (word_mode
, binoptab
,
963 operand_subword_force (op0
, i
, mode
),
964 operand_subword_force (op1
, i
, mode
),
965 target_piece
, unsignedp
, next_methods
);
970 if (target_piece
!= x
)
971 emit_move_insn (target_piece
, x
);
974 insns
= get_insns ();
977 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
979 if (binoptab
->code
!= UNKNOWN
)
981 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
982 copy_rtx (op0
), copy_rtx (op1
));
986 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
991 /* Synthesize double word shifts from single word shifts. */
992 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
993 || binoptab
== ashr_optab
)
995 && GET_CODE (op1
) == CONST_INT
996 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
997 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
998 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
999 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1001 rtx insns
, inter
, equiv_value
;
1002 rtx into_target
, outof_target
;
1003 rtx into_input
, outof_input
;
1004 int shift_count
, left_shift
, outof_word
;
1006 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1007 won't be accurate, so use a new target. */
1008 if (target
== 0 || target
== op0
|| target
== op1
)
1009 target
= gen_reg_rtx (mode
);
1013 shift_count
= INTVAL (op1
);
1015 /* OUTOF_* is the word we are shifting bits away from, and
1016 INTO_* is the word that we are shifting bits towards, thus
1017 they differ depending on the direction of the shift and
1018 WORDS_BIG_ENDIAN. */
1020 left_shift
= binoptab
== ashl_optab
;
1021 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1023 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1024 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1026 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1027 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1029 if (shift_count
>= BITS_PER_WORD
)
1031 inter
= expand_binop (word_mode
, binoptab
,
1033 GEN_INT (shift_count
- BITS_PER_WORD
),
1034 into_target
, unsignedp
, next_methods
);
1036 if (inter
!= 0 && inter
!= into_target
)
1037 emit_move_insn (into_target
, inter
);
1039 /* For a signed right shift, we must fill the word we are shifting
1040 out of with copies of the sign bit. Otherwise it is zeroed. */
1041 if (inter
!= 0 && binoptab
!= ashr_optab
)
1042 inter
= CONST0_RTX (word_mode
);
1043 else if (inter
!= 0)
1044 inter
= expand_binop (word_mode
, binoptab
,
1046 GEN_INT (BITS_PER_WORD
- 1),
1047 outof_target
, unsignedp
, next_methods
);
1049 if (inter
!= 0 && inter
!= outof_target
)
1050 emit_move_insn (outof_target
, inter
);
1055 optab reverse_unsigned_shift
, unsigned_shift
;
1057 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1058 we must do a logical shift in the opposite direction of the
1061 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1063 /* For a shift of less than BITS_PER_WORD, to compute the word
1064 shifted towards, we need to unsigned shift the orig value of
1067 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1069 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1071 GEN_INT (BITS_PER_WORD
- shift_count
),
1072 0, unsignedp
, next_methods
);
1077 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1078 op1
, 0, unsignedp
, next_methods
);
1081 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1082 into_target
, unsignedp
, next_methods
);
1084 if (inter
!= 0 && inter
!= into_target
)
1085 emit_move_insn (into_target
, inter
);
1088 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1089 op1
, outof_target
, unsignedp
, next_methods
);
1091 if (inter
!= 0 && inter
!= outof_target
)
1092 emit_move_insn (outof_target
, inter
);
1095 insns
= get_insns ();
1100 if (binoptab
->code
!= UNKNOWN
)
1101 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1105 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1110 /* Synthesize double word rotates from single word shifts. */
1111 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1112 && class == MODE_INT
1113 && GET_CODE (op1
) == CONST_INT
1114 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1115 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1116 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1118 rtx insns
, equiv_value
;
1119 rtx into_target
, outof_target
;
1120 rtx into_input
, outof_input
;
1122 int shift_count
, left_shift
, outof_word
;
1124 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1125 won't be accurate, so use a new target. */
1126 if (target
== 0 || target
== op0
|| target
== op1
)
1127 target
= gen_reg_rtx (mode
);
1131 shift_count
= INTVAL (op1
);
1133 /* OUTOF_* is the word we are shifting bits away from, and
1134 INTO_* is the word that we are shifting bits towards, thus
1135 they differ depending on the direction of the shift and
1136 WORDS_BIG_ENDIAN. */
1138 left_shift
= (binoptab
== rotl_optab
);
1139 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1141 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1142 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1144 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1145 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1147 if (shift_count
== BITS_PER_WORD
)
1149 /* This is just a word swap. */
1150 emit_move_insn (outof_target
, into_input
);
1151 emit_move_insn (into_target
, outof_input
);
1156 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1157 rtx first_shift_count
, second_shift_count
;
1158 optab reverse_unsigned_shift
, unsigned_shift
;
1160 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1161 ? lshr_optab
: ashl_optab
);
1163 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1164 ? ashl_optab
: lshr_optab
);
1166 if (shift_count
> BITS_PER_WORD
)
1168 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1169 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
1173 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1174 second_shift_count
= GEN_INT (shift_count
);
1177 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1178 outof_input
, first_shift_count
,
1179 NULL_RTX
, unsignedp
, next_methods
);
1180 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1181 into_input
, second_shift_count
,
1182 into_target
, unsignedp
, next_methods
);
1184 if (into_temp1
!= 0 && into_temp2
!= 0)
1185 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1186 into_target
, unsignedp
, next_methods
);
1190 if (inter
!= 0 && inter
!= into_target
)
1191 emit_move_insn (into_target
, inter
);
1193 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1194 into_input
, first_shift_count
,
1195 NULL_RTX
, unsignedp
, next_methods
);
1196 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1197 outof_input
, second_shift_count
,
1198 outof_target
, unsignedp
, next_methods
);
1200 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1201 inter
= expand_binop (word_mode
, ior_optab
,
1202 outof_temp1
, outof_temp2
,
1203 outof_target
, unsignedp
, next_methods
);
1205 if (inter
!= 0 && inter
!= outof_target
)
1206 emit_move_insn (outof_target
, inter
);
1209 insns
= get_insns ();
1214 if (binoptab
->code
!= UNKNOWN
)
1215 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1219 /* We can't make this a no conflict block if this is a word swap,
1220 because the word swap case fails if the input and output values
1221 are in the same register. */
1222 if (shift_count
!= BITS_PER_WORD
)
1223 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1232 /* These can be done a word at a time by propagating carries. */
1233 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1234 && class == MODE_INT
1235 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1236 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1239 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1240 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1241 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1242 rtx xop0
, xop1
, xtarget
;
1244 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1245 value is one of those, use it. Otherwise, use 1 since it is the
1246 one easiest to get. */
1247 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1248 int normalizep
= STORE_FLAG_VALUE
;
1253 /* Prepare the operands. */
1254 xop0
= force_reg (mode
, op0
);
1255 xop1
= force_reg (mode
, op1
);
1257 xtarget
= gen_reg_rtx (mode
);
1259 if (target
== 0 || GET_CODE (target
) != REG
)
1262 /* Indicate for flow that the entire target reg is being set. */
1263 if (GET_CODE (target
) == REG
)
1264 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1266 /* Do the actual arithmetic. */
1267 for (i
= 0; i
< nwords
; i
++)
1269 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1270 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1271 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1272 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1275 /* Main add/subtract of the input operands. */
1276 x
= expand_binop (word_mode
, binoptab
,
1277 op0_piece
, op1_piece
,
1278 target_piece
, unsignedp
, next_methods
);
1284 /* Store carry from main add/subtract. */
1285 carry_out
= gen_reg_rtx (word_mode
);
1286 carry_out
= emit_store_flag_force (carry_out
,
1287 (binoptab
== add_optab
1290 word_mode
, 1, normalizep
);
1297 /* Add/subtract previous carry to main result. */
1298 newx
= expand_binop (word_mode
,
1299 normalizep
== 1 ? binoptab
: otheroptab
,
1301 NULL_RTX
, 1, next_methods
);
1305 /* Get out carry from adding/subtracting carry in. */
1306 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1307 carry_tmp
= emit_store_flag_force (carry_tmp
,
1308 (binoptab
== add_optab
1311 word_mode
, 1, normalizep
);
1313 /* Logical-ior the two poss. carry together. */
1314 carry_out
= expand_binop (word_mode
, ior_optab
,
1315 carry_out
, carry_tmp
,
1316 carry_out
, 0, next_methods
);
1320 emit_move_insn (target_piece
, newx
);
1323 carry_in
= carry_out
;
1326 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1328 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1330 rtx temp
= emit_move_insn (target
, xtarget
);
1332 set_unique_reg_note (temp
,
1334 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1343 delete_insns_since (last
);
1346 /* If we want to multiply two two-word values and have normal and widening
1347 multiplies of single-word values, we can do this with three smaller
1348 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1349 because we are not operating on one word at a time.
1351 The multiplication proceeds as follows:
1352 _______________________
1353 [__op0_high_|__op0_low__]
1354 _______________________
1355 * [__op1_high_|__op1_low__]
1356 _______________________________________________
1357 _______________________
1358 (1) [__op0_low__*__op1_low__]
1359 _______________________
1360 (2a) [__op0_low__*__op1_high_]
1361 _______________________
1362 (2b) [__op0_high_*__op1_low__]
1363 _______________________
1364 (3) [__op0_high_*__op1_high_]
1367 This gives a 4-word result. Since we are only interested in the
1368 lower 2 words, partial result (3) and the upper words of (2a) and
1369 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1370 calculated using non-widening multiplication.
1372 (1), however, needs to be calculated with an unsigned widening
1373 multiplication. If this operation is not directly supported we
1374 try using a signed widening multiplication and adjust the result.
1375 This adjustment works as follows:
1377 If both operands are positive then no adjustment is needed.
1379 If the operands have different signs, for example op0_low < 0 and
1380 op1_low >= 0, the instruction treats the most significant bit of
1381 op0_low as a sign bit instead of a bit with significance
1382 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1383 with 2**BITS_PER_WORD - op0_low, and two's complements the
1384 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1387 Similarly, if both operands are negative, we need to add
1388 (op0_low + op1_low) * 2**BITS_PER_WORD.
1390 We use a trick to adjust quickly. We logically shift op0_low right
1391 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1392 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1393 logical shift exists, we do an arithmetic right shift and subtract
1396 if (binoptab
== smul_optab
1397 && class == MODE_INT
1398 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1399 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1400 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1401 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1402 != CODE_FOR_nothing
)
1403 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1404 != CODE_FOR_nothing
)))
1406 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1407 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1408 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1409 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1410 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1411 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1413 rtx op0_xhigh
= NULL_RTX
;
1414 rtx op1_xhigh
= NULL_RTX
;
1416 /* If the target is the same as one of the inputs, don't use it. This
1417 prevents problems with the REG_EQUAL note. */
1418 if (target
== op0
|| target
== op1
1419 || (target
!= 0 && GET_CODE (target
) != REG
))
1422 /* Multiply the two lower words to get a double-word product.
1423 If unsigned widening multiplication is available, use that;
1424 otherwise use the signed form and compensate. */
1426 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1428 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1429 target
, 1, OPTAB_DIRECT
);
1431 /* If we didn't succeed, delete everything we did so far. */
1433 delete_insns_since (last
);
1435 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1439 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1440 != CODE_FOR_nothing
)
1442 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1443 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1444 target
, 1, OPTAB_DIRECT
);
1445 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1446 NULL_RTX
, 1, next_methods
);
1448 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1449 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1452 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1453 NULL_RTX
, 0, next_methods
);
1455 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1456 op0_xhigh
, op0_xhigh
, 0,
1460 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1461 NULL_RTX
, 1, next_methods
);
1463 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1464 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1467 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1468 NULL_RTX
, 0, next_methods
);
1470 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1471 op1_xhigh
, op1_xhigh
, 0,
1476 /* If we have been able to directly compute the product of the
1477 low-order words of the operands and perform any required adjustments
1478 of the operands, we proceed by trying two more multiplications
1479 and then computing the appropriate sum.
1481 We have checked above that the required addition is provided.
1482 Full-word addition will normally always succeed, especially if
1483 it is provided at all, so we don't worry about its failure. The
1484 multiplication may well fail, however, so we do handle that. */
1486 if (product
&& op0_xhigh
&& op1_xhigh
)
1488 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1489 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1490 NULL_RTX
, 0, OPTAB_DIRECT
);
1492 if (!REG_P (product_high
))
1493 product_high
= force_reg (word_mode
, product_high
);
1496 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1497 product_high
, 0, next_methods
);
1499 if (temp
!= 0 && temp
!= product_high
)
1500 emit_move_insn (product_high
, temp
);
1503 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1504 NULL_RTX
, 0, OPTAB_DIRECT
);
1507 temp
= expand_binop (word_mode
, add_optab
, temp
,
1508 product_high
, product_high
,
1511 if (temp
!= 0 && temp
!= product_high
)
1512 emit_move_insn (product_high
, temp
);
1514 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1518 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1520 temp
= emit_move_insn (product
, product
);
1521 set_unique_reg_note (temp
,
1523 gen_rtx_fmt_ee (MULT
, mode
,
1532 /* If we get here, we couldn't do it for some reason even though we
1533 originally thought we could. Delete anything we've emitted in
1536 delete_insns_since (last
);
1539 /* Open-code the vector operations if we have no hardware support
1541 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1542 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1543 unsignedp
, methods
);
1545 /* We need to open-code the complex type operations: '+, -, * and /' */
1547 /* At this point we allow operations between two similar complex
1548 numbers, and also if one of the operands is not a complex number
1549 but rather of MODE_FLOAT or MODE_INT. However, the caller
1550 must make sure that the MODE of the non-complex operand matches
1551 the SUBMODE of the complex operand. */
1553 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1555 rtx real0
= 0, imag0
= 0;
1556 rtx real1
= 0, imag1
= 0;
1557 rtx realr
, imagr
, res
;
1562 /* Find the correct mode for the real and imaginary parts */
1563 enum machine_mode submode
1564 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1565 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1568 if (submode
== BLKmode
)
1572 target
= gen_reg_rtx (mode
);
1576 realr
= gen_realpart (submode
, target
);
1577 imagr
= gen_imagpart (submode
, target
);
1579 if (GET_MODE (op0
) == mode
)
1581 real0
= gen_realpart (submode
, op0
);
1582 imag0
= gen_imagpart (submode
, op0
);
1587 if (GET_MODE (op1
) == mode
)
1589 real1
= gen_realpart (submode
, op1
);
1590 imag1
= gen_imagpart (submode
, op1
);
1595 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1598 switch (binoptab
->code
)
1601 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1603 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1604 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1605 realr
, unsignedp
, methods
);
1609 else if (res
!= realr
)
1610 emit_move_insn (realr
, res
);
1612 if (!complex_part_zero_p (imag0
, class, submode
)
1613 && !complex_part_zero_p (imag1
, class, submode
))
1614 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1615 imagr
, unsignedp
, methods
);
1616 else if (!complex_part_zero_p (imag0
, class, submode
))
1618 else if (binoptab
->code
== MINUS
)
1619 res
= expand_unop (submode
,
1620 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1621 imag1
, imagr
, unsignedp
);
1627 else if (res
!= imagr
)
1628 emit_move_insn (imagr
, res
);
1634 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1636 if (!complex_part_zero_p (imag0
, class, submode
)
1637 && !complex_part_zero_p (imag1
, class, submode
))
1641 /* Don't fetch these from memory more than once. */
1642 real0
= force_reg (submode
, real0
);
1643 real1
= force_reg (submode
, real1
);
1644 imag0
= force_reg (submode
, imag0
);
1645 imag1
= force_reg (submode
, imag1
);
1647 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1648 unsignedp
, methods
);
1650 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1651 unsignedp
, methods
);
1653 if (temp1
== 0 || temp2
== 0)
1658 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1659 temp1
, temp2
, realr
, unsignedp
, methods
));
1663 else if (res
!= realr
)
1664 emit_move_insn (realr
, res
);
1666 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1667 NULL_RTX
, unsignedp
, methods
);
1669 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1670 NULL_RTX
, unsignedp
, methods
);
1672 if (temp1
== 0 || temp2
== 0)
1677 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1678 temp1
, temp2
, imagr
, unsignedp
, methods
));
1682 else if (res
!= imagr
)
1683 emit_move_insn (imagr
, res
);
1689 /* Don't fetch these from memory more than once. */
1690 real0
= force_reg (submode
, real0
);
1691 real1
= force_reg (submode
, real1
);
1693 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1694 realr
, unsignedp
, methods
);
1697 else if (res
!= realr
)
1698 emit_move_insn (realr
, res
);
1700 if (!complex_part_zero_p (imag0
, class, submode
))
1701 res
= expand_binop (submode
, binoptab
,
1702 real1
, imag0
, imagr
, unsignedp
, methods
);
1704 res
= expand_binop (submode
, binoptab
,
1705 real0
, imag1
, imagr
, unsignedp
, methods
);
1709 else if (res
!= imagr
)
1710 emit_move_insn (imagr
, res
);
1717 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1719 if (complex_part_zero_p (imag1
, class, submode
))
1721 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1723 /* Don't fetch these from memory more than once. */
1724 real1
= force_reg (submode
, real1
);
1726 /* Simply divide the real and imaginary parts by `c' */
1727 if (class == MODE_COMPLEX_FLOAT
)
1728 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1729 realr
, unsignedp
, methods
);
1731 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1732 real0
, real1
, realr
, unsignedp
);
1736 else if (res
!= realr
)
1737 emit_move_insn (realr
, res
);
1739 if (class == MODE_COMPLEX_FLOAT
)
1740 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1741 imagr
, unsignedp
, methods
);
1743 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1744 imag0
, real1
, imagr
, unsignedp
);
1748 else if (res
!= imagr
)
1749 emit_move_insn (imagr
, res
);
1755 switch (flag_complex_divide_method
)
1758 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1759 realr
, imagr
, submode
,
1765 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1766 realr
, imagr
, submode
,
1786 if (binoptab
->code
!= UNKNOWN
)
1788 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1789 copy_rtx (op0
), copy_rtx (op1
));
1793 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1799 /* It can't be open-coded in this mode.
1800 Use a library call if one is available and caller says that's ok. */
1802 if (binoptab
->handlers
[(int) mode
].libfunc
1803 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1807 enum machine_mode op1_mode
= mode
;
1814 op1_mode
= word_mode
;
1815 /* Specify unsigned here,
1816 since negative shift counts are meaningless. */
1817 op1x
= convert_to_mode (word_mode
, op1
, 1);
1820 if (GET_MODE (op0
) != VOIDmode
1821 && GET_MODE (op0
) != mode
)
1822 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1824 /* Pass 1 for NO_QUEUE so we don't lose any increments
1825 if the libcall is cse'd or moved. */
1826 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1827 NULL_RTX
, LCT_CONST
, mode
, 2,
1828 op0
, mode
, op1x
, op1_mode
);
1830 insns
= get_insns ();
1833 target
= gen_reg_rtx (mode
);
1834 emit_libcall_block (insns
, target
, value
,
1835 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1840 delete_insns_since (last
);
1842 /* It can't be done in this mode. Can we do it in a wider mode? */
1844 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1845 || methods
== OPTAB_MUST_WIDEN
))
1847 /* Caller says, don't even try. */
1848 delete_insns_since (entry_last
);
1852 /* Compute the value of METHODS to pass to recursive calls.
1853 Don't allow widening to be tried recursively. */
1855 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1857 /* Look for a wider mode of the same class for which it appears we can do
1860 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1862 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1863 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1865 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1866 != CODE_FOR_nothing
)
1867 || (methods
== OPTAB_LIB
1868 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1870 rtx xop0
= op0
, xop1
= op1
;
1873 /* For certain integer operations, we need not actually extend
1874 the narrow operands, as long as we will truncate
1875 the results to the same narrowness. */
1877 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1878 || binoptab
== xor_optab
1879 || binoptab
== add_optab
|| binoptab
== sub_optab
1880 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1881 && class == MODE_INT
)
1884 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1885 unsignedp
, no_extend
);
1887 /* The second operand of a shift must always be extended. */
1888 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1889 no_extend
&& binoptab
!= ashl_optab
);
1891 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1892 unsignedp
, methods
);
1895 if (class != MODE_INT
)
1898 target
= gen_reg_rtx (mode
);
1899 convert_move (target
, temp
, 0);
1903 return gen_lowpart (mode
, temp
);
1906 delete_insns_since (last
);
1911 delete_insns_since (entry_last
);
1915 /* Like expand_binop, but for open-coding vectors binops. */
1918 expand_vector_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
1919 enum machine_mode mode
;
1924 enum optab_methods methods
;
1926 enum machine_mode submode
;
1928 rtx t
, a
, b
, res
, seq
;
1929 enum mode_class
class;
1931 class = GET_MODE_CLASS (mode
);
1933 submode
= GET_MODE_INNER (mode
);
1934 elts
= GET_MODE_NUNITS (mode
);
1937 target
= gen_reg_rtx (mode
);
1941 /* FIXME: Optimally, we should try to do this in narrower vector
1942 modes if available. E.g. When trying V8SI, try V4SI, else
1943 V2SI, else decay into SI. */
1945 switch (binoptab
->code
)
1951 for (i
= 0; i
< elts
; ++i
)
1953 t
= simplify_gen_subreg (submode
, target
, mode
,
1954 i
* UNITS_PER_WORD
);
1955 a
= simplify_gen_subreg (submode
, op0
, mode
,
1956 i
* UNITS_PER_WORD
);
1957 b
= simplify_gen_subreg (submode
, op1
, mode
,
1958 i
* UNITS_PER_WORD
);
1960 if (binoptab
->code
== DIV
)
1962 if (class == MODE_VECTOR_FLOAT
)
1963 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1964 unsignedp
, methods
);
1966 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1967 a
, b
, t
, unsignedp
);
1970 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1971 unsignedp
, methods
);
1976 emit_move_insn (t
, res
);
1991 /* Like expand_unop but for open-coding vector unops. */
1994 expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
)
1995 enum machine_mode mode
;
2001 enum machine_mode submode
;
2005 submode
= GET_MODE_INNER (mode
);
2006 elts
= GET_MODE_NUNITS (mode
);
2009 target
= gen_reg_rtx (mode
);
2013 /* FIXME: Optimally, we should try to do this in narrower vector
2014 modes if available. E.g. When trying V8SI, try V4SI, else
2015 V2SI, else decay into SI. */
2017 for (i
= 0; i
< elts
; ++i
)
2019 t
= simplify_gen_subreg (submode
, target
, mode
, i
* UNITS_PER_WORD
);
2020 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* UNITS_PER_WORD
);
2022 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2024 emit_move_insn (t
, res
);
2034 /* Expand a binary operator which has both signed and unsigned forms.
2035 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2038 If we widen unsigned operands, we may use a signed wider operation instead
2039 of an unsigned wider operation, since the result would be the same. */
2042 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
2043 enum machine_mode mode
;
2044 optab uoptab
, soptab
;
2045 rtx op0
, op1
, target
;
2047 enum optab_methods methods
;
2050 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2051 struct optab wide_soptab
;
2053 /* Do it without widening, if possible. */
2054 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2055 unsignedp
, OPTAB_DIRECT
);
2056 if (temp
|| methods
== OPTAB_DIRECT
)
2059 /* Try widening to a signed int. Make a fake signed optab that
2060 hides any signed insn for direct use. */
2061 wide_soptab
= *soptab
;
2062 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2063 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2065 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2066 unsignedp
, OPTAB_WIDEN
);
2068 /* For unsigned operands, try widening to an unsigned int. */
2069 if (temp
== 0 && unsignedp
)
2070 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2071 unsignedp
, OPTAB_WIDEN
);
2072 if (temp
|| methods
== OPTAB_WIDEN
)
2075 /* Use the right width lib call if that exists. */
2076 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2077 if (temp
|| methods
== OPTAB_LIB
)
2080 /* Must widen and use a lib call, use either signed or unsigned. */
2081 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2082 unsignedp
, methods
);
2086 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2087 unsignedp
, methods
);
2091 /* Generate code to perform an operation specified by BINOPTAB
2092 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2093 We assume that the order of the operands for the instruction
2094 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2095 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2097 Either TARG0 or TARG1 may be zero, but what that means is that
2098 the result is not actually wanted. We will generate it into
2099 a dummy pseudo-reg and discard it. They may not both be zero.
2101 Returns 1 if this operation can be performed; 0 if not. */
2104 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
2110 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2111 enum mode_class
class;
2112 enum machine_mode wider_mode
;
2113 rtx entry_last
= get_last_insn ();
2116 class = GET_MODE_CLASS (mode
);
2118 op0
= protect_from_queue (op0
, 0);
2119 op1
= protect_from_queue (op1
, 0);
2123 op0
= force_not_mem (op0
);
2124 op1
= force_not_mem (op1
);
2127 /* If we are inside an appropriately-short loop and one operand is an
2128 expensive constant, force it into a register. */
2129 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2130 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2131 op0
= force_reg (mode
, op0
);
2133 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2134 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2135 op1
= force_reg (mode
, op1
);
2138 targ0
= protect_from_queue (targ0
, 1);
2140 targ0
= gen_reg_rtx (mode
);
2142 targ1
= protect_from_queue (targ1
, 1);
2144 targ1
= gen_reg_rtx (mode
);
2146 /* Record where to go back to if we fail. */
2147 last
= get_last_insn ();
2149 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2151 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2152 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2153 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2155 rtx xop0
= op0
, xop1
= op1
;
2157 /* In case this insn wants input operands in modes different from the
2158 result, convert the operands. */
2159 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
2160 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2162 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
2163 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
2165 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2166 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2167 xop0
= copy_to_mode_reg (mode0
, xop0
);
2169 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2170 xop1
= copy_to_mode_reg (mode1
, xop1
);
2172 /* We could handle this, but we should always be called with a pseudo
2173 for our targets and all insns should take them as outputs. */
2174 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2175 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2178 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2185 delete_insns_since (last
);
2188 /* It can't be done in this mode. Can we do it in a wider mode? */
2190 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2192 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2193 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2195 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2196 != CODE_FOR_nothing
)
2198 rtx t0
= gen_reg_rtx (wider_mode
);
2199 rtx t1
= gen_reg_rtx (wider_mode
);
2200 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2201 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2203 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2206 convert_move (targ0
, t0
, unsignedp
);
2207 convert_move (targ1
, t1
, unsignedp
);
2211 delete_insns_since (last
);
2216 delete_insns_since (entry_last
);
2220 /* Wrapper around expand_unop which takes an rtx code to specify
2221 the operation to perform, not an optab pointer. All other
2222 arguments are the same. */
2224 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2225 enum machine_mode mode
;
2231 optab unop
= code_to_optab
[(int) code
];
2235 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2238 /* Generate code to perform an operation specified by UNOPTAB
2239 on operand OP0, with result having machine-mode MODE.
2241 UNSIGNEDP is for the case where we have to widen the operands
2242 to perform the operation. It says to use zero-extension.
2244 If TARGET is nonzero, the value
2245 is generated there, if it is convenient to do so.
2246 In all cases an rtx is returned for the locus of the value;
2247 this may or may not be TARGET. */
2250 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2251 enum machine_mode mode
;
2257 enum mode_class
class;
2258 enum machine_mode wider_mode
;
2260 rtx last
= get_last_insn ();
2263 class = GET_MODE_CLASS (mode
);
2265 op0
= protect_from_queue (op0
, 0);
2269 op0
= force_not_mem (op0
);
2273 target
= protect_from_queue (target
, 1);
2275 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2277 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2278 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2284 temp
= gen_reg_rtx (mode
);
2286 if (GET_MODE (xop0
) != VOIDmode
2287 && GET_MODE (xop0
) != mode0
)
2288 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2290 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2292 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2293 xop0
= copy_to_mode_reg (mode0
, xop0
);
2295 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2296 temp
= gen_reg_rtx (mode
);
2298 pat
= GEN_FCN (icode
) (temp
, xop0
);
2301 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2302 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2304 delete_insns_since (last
);
2305 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2313 delete_insns_since (last
);
2316 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2318 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2319 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2320 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2322 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2326 /* For certain operations, we need not actually extend
2327 the narrow operand, as long as we will truncate the
2328 results to the same narrowness. */
2330 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2331 (unoptab
== neg_optab
2332 || unoptab
== one_cmpl_optab
)
2333 && class == MODE_INT
);
2335 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2340 if (class != MODE_INT
)
2343 target
= gen_reg_rtx (mode
);
2344 convert_move (target
, temp
, 0);
2348 return gen_lowpart (mode
, temp
);
2351 delete_insns_since (last
);
2355 /* These can be done a word at a time. */
2356 if (unoptab
== one_cmpl_optab
2357 && class == MODE_INT
2358 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2359 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2364 if (target
== 0 || target
== op0
)
2365 target
= gen_reg_rtx (mode
);
2369 /* Do the actual arithmetic. */
2370 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2372 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2373 rtx x
= expand_unop (word_mode
, unoptab
,
2374 operand_subword_force (op0
, i
, mode
),
2375 target_piece
, unsignedp
);
2377 if (target_piece
!= x
)
2378 emit_move_insn (target_piece
, x
);
2381 insns
= get_insns ();
2384 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2385 gen_rtx_fmt_e (unoptab
->code
, mode
,
2390 /* Open-code the complex negation operation. */
2391 else if (unoptab
->code
== NEG
2392 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2398 /* Find the correct mode for the real and imaginary parts */
2399 enum machine_mode submode
2400 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2401 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2404 if (submode
== BLKmode
)
2408 target
= gen_reg_rtx (mode
);
2412 target_piece
= gen_imagpart (submode
, target
);
2413 x
= expand_unop (submode
, unoptab
,
2414 gen_imagpart (submode
, op0
),
2415 target_piece
, unsignedp
);
2416 if (target_piece
!= x
)
2417 emit_move_insn (target_piece
, x
);
2419 target_piece
= gen_realpart (submode
, target
);
2420 x
= expand_unop (submode
, unoptab
,
2421 gen_realpart (submode
, op0
),
2422 target_piece
, unsignedp
);
2423 if (target_piece
!= x
)
2424 emit_move_insn (target_piece
, x
);
2429 emit_no_conflict_block (seq
, target
, op0
, 0,
2430 gen_rtx_fmt_e (unoptab
->code
, mode
,
2435 /* Now try a library call in this mode. */
2436 if (unoptab
->handlers
[(int) mode
].libfunc
)
2443 /* Pass 1 for NO_QUEUE so we don't lose any increments
2444 if the libcall is cse'd or moved. */
2445 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2446 NULL_RTX
, LCT_CONST
, mode
, 1, op0
, mode
);
2447 insns
= get_insns ();
2450 target
= gen_reg_rtx (mode
);
2451 emit_libcall_block (insns
, target
, value
,
2452 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2457 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2458 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2460 /* It can't be done in this mode. Can we do it in a wider mode? */
2462 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2464 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2465 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2467 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2468 != CODE_FOR_nothing
)
2469 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2473 /* For certain operations, we need not actually extend
2474 the narrow operand, as long as we will truncate the
2475 results to the same narrowness. */
2477 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2478 (unoptab
== neg_optab
2479 || unoptab
== one_cmpl_optab
)
2480 && class == MODE_INT
);
2482 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2487 if (class != MODE_INT
)
2490 target
= gen_reg_rtx (mode
);
2491 convert_move (target
, temp
, 0);
2495 return gen_lowpart (mode
, temp
);
2498 delete_insns_since (last
);
2503 /* If there is no negate operation, try doing a subtract from zero.
2504 The US Software GOFAST library needs this. */
2505 if (unoptab
->code
== NEG
)
2508 temp
= expand_binop (mode
,
2509 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2510 CONST0_RTX (mode
), op0
,
2511 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2519 /* Emit code to compute the absolute value of OP0, with result to
2520 TARGET if convenient. (TARGET may be 0.) The return value says
2521 where the result actually is to be found.
2523 MODE is the mode of the operand; the mode of the result is
2524 different but can be deduced from MODE.
2529 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2530 enum machine_mode mode
;
2533 int result_unsignedp
;
2539 result_unsignedp
= 1;
2541 /* First try to do it with a special abs instruction. */
2542 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2547 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2548 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2550 rtx last
= get_last_insn ();
2552 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2554 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2560 delete_insns_since (last
);
2563 /* If this machine has expensive jumps, we can do integer absolute
2564 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2565 where W is the width of MODE. */
2567 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2569 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2570 size_int (GET_MODE_BITSIZE (mode
) - 1),
2573 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2576 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2577 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2583 /* If that does not win, use conditional jump and negate. */
2585 /* It is safe to use the target if it is the same
2586 as the source if this is also a pseudo register */
2587 if (op0
== target
&& GET_CODE (op0
) == REG
2588 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2591 op1
= gen_label_rtx ();
2592 if (target
== 0 || ! safe
2593 || GET_MODE (target
) != mode
2594 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2595 || (GET_CODE (target
) == REG
2596 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2597 target
= gen_reg_rtx (mode
);
2599 emit_move_insn (target
, op0
);
2602 /* If this mode is an integer too wide to compare properly,
2603 compare word by word. Rely on CSE to optimize constant cases. */
2604 if (GET_MODE_CLASS (mode
) == MODE_INT
2605 && ! can_compare_p (GE
, mode
, ccp_jump
))
2606 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2609 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2610 NULL_RTX
, NULL_RTX
, op1
);
2612 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2615 emit_move_insn (target
, op0
);
2621 /* Emit code to compute the absolute value of OP0, with result to
2622 TARGET if convenient. (TARGET may be 0.) The return value says
2623 where the result actually is to be found.
2625 MODE is the mode of the operand; the mode of the result is
2626 different but can be deduced from MODE.
2628 UNSIGNEDP is relevant for complex integer modes. */
2631 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2632 enum machine_mode mode
;
2637 enum mode_class
class = GET_MODE_CLASS (mode
);
2638 enum machine_mode wider_mode
;
2640 rtx entry_last
= get_last_insn ();
2643 optab this_abs_optab
;
2645 /* Find the correct mode for the real and imaginary parts. */
2646 enum machine_mode submode
2647 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2648 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2651 if (submode
== BLKmode
)
2654 op0
= protect_from_queue (op0
, 0);
2658 op0
= force_not_mem (op0
);
2661 last
= get_last_insn ();
2664 target
= protect_from_queue (target
, 1);
2666 this_abs_optab
= ! unsignedp
&& flag_trapv
2667 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2668 ? absv_optab
: abs_optab
;
2670 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2672 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2673 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2679 temp
= gen_reg_rtx (submode
);
2681 if (GET_MODE (xop0
) != VOIDmode
2682 && GET_MODE (xop0
) != mode0
)
2683 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2685 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2687 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2688 xop0
= copy_to_mode_reg (mode0
, xop0
);
2690 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2691 temp
= gen_reg_rtx (submode
);
2693 pat
= GEN_FCN (icode
) (temp
, xop0
);
2696 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2697 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2700 delete_insns_since (last
);
2701 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2710 delete_insns_since (last
);
2713 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2715 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2716 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2718 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2719 != CODE_FOR_nothing
)
2723 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2724 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2728 if (class != MODE_COMPLEX_INT
)
2731 target
= gen_reg_rtx (submode
);
2732 convert_move (target
, temp
, 0);
2736 return gen_lowpart (submode
, temp
);
2739 delete_insns_since (last
);
2743 /* Open-code the complex absolute-value operation
2744 if we can open-code sqrt. Otherwise it's not worth while. */
2745 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
2748 rtx real
, imag
, total
;
2750 real
= gen_realpart (submode
, op0
);
2751 imag
= gen_imagpart (submode
, op0
);
2753 /* Square both parts. */
2754 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2755 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2757 /* Sum the parts. */
2758 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2759 0, OPTAB_LIB_WIDEN
);
2761 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2762 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2764 delete_insns_since (last
);
2769 /* Now try a library call in this mode. */
2770 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
2777 /* Pass 1 for NO_QUEUE so we don't lose any increments
2778 if the libcall is cse'd or moved. */
2779 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2780 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
2781 insns
= get_insns ();
2784 target
= gen_reg_rtx (submode
);
2785 emit_libcall_block (insns
, target
, value
,
2786 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
2791 /* It can't be done in this mode. Can we do it in a wider mode? */
2793 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2794 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2796 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2797 != CODE_FOR_nothing
)
2798 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2802 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2804 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2808 if (class != MODE_COMPLEX_INT
)
2811 target
= gen_reg_rtx (submode
);
2812 convert_move (target
, temp
, 0);
2816 return gen_lowpart (submode
, temp
);
2819 delete_insns_since (last
);
2823 delete_insns_since (entry_last
);
2827 /* Generate an instruction whose insn-code is INSN_CODE,
2828 with two operands: an output TARGET and an input OP0.
2829 TARGET *must* be nonzero, and the output is always stored there.
2830 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2831 the value that is stored into TARGET. */
2834 emit_unop_insn (icode
, target
, op0
, code
)
2841 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2844 temp
= target
= protect_from_queue (target
, 1);
2846 op0
= protect_from_queue (op0
, 0);
2848 /* Sign and zero extension from memory is often done specially on
2849 RISC machines, so forcing into a register here can pessimize
2851 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2852 op0
= force_not_mem (op0
);
2854 /* Now, if insn does not accept our operands, put them into pseudos. */
2856 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2857 op0
= copy_to_mode_reg (mode0
, op0
);
2859 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2860 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2861 temp
= gen_reg_rtx (GET_MODE (temp
));
2863 pat
= GEN_FCN (icode
) (temp
, op0
);
2865 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2866 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2871 emit_move_insn (target
, temp
);
2874 /* Emit code to perform a series of operations on a multi-word quantity, one
2877 Such a block is preceded by a CLOBBER of the output, consists of multiple
2878 insns, each setting one word of the output, and followed by a SET copying
2879 the output to itself.
2881 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2882 note indicating that it doesn't conflict with the (also multi-word)
2883 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2886 INSNS is a block of code generated to perform the operation, not including
2887 the CLOBBER and final copy. All insns that compute intermediate values
2888 are first emitted, followed by the block as described above.
2890 TARGET, OP0, and OP1 are the output and inputs of the operations,
2891 respectively. OP1 may be zero for a unary operation.
2893 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2896 If TARGET is not a register, INSNS is simply emitted with no special
2897 processing. Likewise if anything in INSNS is not an INSN or if
2898 there is a libcall block inside INSNS.
2900 The final insn emitted is returned. */
2903 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2909 rtx prev
, next
, first
, last
, insn
;
2911 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2912 return emit_insn (insns
);
2914 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2915 if (GET_CODE (insn
) != INSN
2916 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2917 return emit_insn (insns
);
2919 /* First emit all insns that do not store into words of the output and remove
2920 these from the list. */
2921 for (insn
= insns
; insn
; insn
= next
)
2926 next
= NEXT_INSN (insn
);
2928 /* Some ports (cris) create an libcall regions at their own. We must
2929 avoid any potential nesting of LIBCALLs. */
2930 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2931 remove_note (insn
, note
);
2932 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2933 remove_note (insn
, note
);
2935 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2936 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2937 set
= PATTERN (insn
);
2938 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2940 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2941 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2943 set
= XVECEXP (PATTERN (insn
), 0, i
);
2951 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2953 if (PREV_INSN (insn
))
2954 NEXT_INSN (PREV_INSN (insn
)) = next
;
2959 PREV_INSN (next
) = PREV_INSN (insn
);
2965 prev
= get_last_insn ();
2967 /* Now write the CLOBBER of the output, followed by the setting of each
2968 of the words, followed by the final copy. */
2969 if (target
!= op0
&& target
!= op1
)
2970 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2972 for (insn
= insns
; insn
; insn
= next
)
2974 next
= NEXT_INSN (insn
);
2977 if (op1
&& GET_CODE (op1
) == REG
)
2978 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2981 if (op0
&& GET_CODE (op0
) == REG
)
2982 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2986 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2987 != CODE_FOR_nothing
)
2989 last
= emit_move_insn (target
, target
);
2991 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2995 last
= get_last_insn ();
2997 /* Remove any existing REG_EQUAL note from "last", or else it will
2998 be mistaken for a note referring to the full contents of the
2999 alleged libcall value when found together with the REG_RETVAL
3000 note added below. An existing note can come from an insn
3001 expansion at "last". */
3002 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3006 first
= get_insns ();
3008 first
= NEXT_INSN (prev
);
3010 /* Encapsulate the block so it gets manipulated as a unit. */
3011 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3013 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3018 /* Emit code to make a call to a constant function or a library call.
3020 INSNS is a list containing all insns emitted in the call.
3021 These insns leave the result in RESULT. Our block is to copy RESULT
3022 to TARGET, which is logically equivalent to EQUIV.
3024 We first emit any insns that set a pseudo on the assumption that these are
3025 loading constants into registers; doing so allows them to be safely cse'ed
3026 between blocks. Then we emit all the other insns in the block, followed by
3027 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3028 note with an operand of EQUIV.
3030 Moving assignments to pseudos outside of the block is done to improve
3031 the generated code, but is not required to generate correct code,
3032 hence being unable to move an assignment is not grounds for not making
3033 a libcall block. There are two reasons why it is safe to leave these
3034 insns inside the block: First, we know that these pseudos cannot be
3035 used in generated RTL outside the block since they are created for
3036 temporary purposes within the block. Second, CSE will not record the
3037 values of anything set inside a libcall block, so we know they must
3038 be dead at the end of the block.
3040 Except for the first group of insns (the ones setting pseudos), the
3041 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3044 emit_libcall_block (insns
, target
, result
, equiv
)
3050 rtx final_dest
= target
;
3051 rtx prev
, next
, first
, last
, insn
;
3053 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3054 into a MEM later. Protect the libcall block from this change. */
3055 if (! REG_P (target
) || REG_USERVAR_P (target
))
3056 target
= gen_reg_rtx (GET_MODE (target
));
3058 /* If we're using non-call exceptions, a libcall corresponding to an
3059 operation that may trap may also trap. */
3060 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3062 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3063 if (GET_CODE (insn
) == CALL_INSN
)
3065 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3067 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3068 remove_note (insn
, note
);
3072 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3073 reg note to indicate that this call cannot throw or execute a nonlocal
3074 goto (unless there is already a REG_EH_REGION note, in which case
3076 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3077 if (GET_CODE (insn
) == CALL_INSN
)
3079 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3082 XEXP (note
, 0) = GEN_INT (-1);
3084 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3088 /* First emit all insns that set pseudos. Remove them from the list as
3089 we go. Avoid insns that set pseudos which were referenced in previous
3090 insns. These can be generated by move_by_pieces, for example,
3091 to update an address. Similarly, avoid insns that reference things
3092 set in previous insns. */
3094 for (insn
= insns
; insn
; insn
= next
)
3096 rtx set
= single_set (insn
);
3099 /* Some ports (cris) create an libcall regions at their own. We must
3100 avoid any potential nesting of LIBCALLs. */
3101 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3102 remove_note (insn
, note
);
3103 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3104 remove_note (insn
, note
);
3106 next
= NEXT_INSN (insn
);
3108 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3109 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3111 || ((! INSN_P(insns
)
3112 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3113 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3114 && ! modified_in_p (SET_SRC (set
), insns
)
3115 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3117 if (PREV_INSN (insn
))
3118 NEXT_INSN (PREV_INSN (insn
)) = next
;
3123 PREV_INSN (next
) = PREV_INSN (insn
);
3129 prev
= get_last_insn ();
3131 /* Write the remaining insns followed by the final copy. */
3133 for (insn
= insns
; insn
; insn
= next
)
3135 next
= NEXT_INSN (insn
);
3140 last
= emit_move_insn (target
, result
);
3141 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3142 != CODE_FOR_nothing
)
3143 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3146 /* Remove any existing REG_EQUAL note from "last", or else it will
3147 be mistaken for a note referring to the full contents of the
3148 libcall value when found together with the REG_RETVAL note added
3149 below. An existing note can come from an insn expansion at
3151 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3154 if (final_dest
!= target
)
3155 emit_move_insn (final_dest
, target
);
3158 first
= get_insns ();
3160 first
= NEXT_INSN (prev
);
3162 /* Encapsulate the block so it gets manipulated as a unit. */
3163 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3165 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3167 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3172 /* Generate code to store zero in X. */
3178 emit_move_insn (x
, const0_rtx
);
3181 /* Generate code to store 1 in X
3182 assuming it contains zero beforehand. */
3185 emit_0_to_1_insn (x
)
3188 emit_move_insn (x
, const1_rtx
);
3191 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3192 PURPOSE describes how this comparison will be used. CODE is the rtx
3193 comparison code we will be using.
3195 ??? Actually, CODE is slightly weaker than that. A target is still
3196 required to implement all of the normal bcc operations, but not
3197 required to implement all (or any) of the unordered bcc operations. */
3200 can_compare_p (code
, mode
, purpose
)
3202 enum machine_mode mode
;
3203 enum can_compare_purpose purpose
;
3207 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3209 if (purpose
== ccp_jump
)
3210 return bcc_gen_fctn
[(int)code
] != NULL
;
3211 else if (purpose
== ccp_store_flag
)
3212 return setcc_gen_code
[(int)code
] != CODE_FOR_nothing
;
3214 /* There's only one cmov entry point, and it's allowed to fail. */
3217 if (purpose
== ccp_jump
3218 && cbranch_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3220 if (purpose
== ccp_cmov
3221 && cmov_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3223 if (purpose
== ccp_store_flag
3224 && cstore_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3227 mode
= GET_MODE_WIDER_MODE (mode
);
3229 while (mode
!= VOIDmode
);
3234 /* This function is called when we are going to emit a compare instruction that
3235 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3237 *PMODE is the mode of the inputs (in case they are const_int).
3238 *PUNSIGNEDP nonzero says that the operands are unsigned;
3239 this matters if they need to be widened.
3241 If they have mode BLKmode, then SIZE specifies the size of both operands.
3243 This function performs all the setup necessary so that the caller only has
3244 to emit a single comparison insn. This setup can involve doing a BLKmode
3245 comparison or emitting a library call to perform the comparison if no insn
3246 is available to handle it.
3247 The values which are passed in through pointers can be modified; the caller
3248 should perform the comparison on the modified values. */
3251 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3253 enum rtx_code
*pcomparison
;
3255 enum machine_mode
*pmode
;
3257 enum can_compare_purpose purpose
;
3259 enum machine_mode mode
= *pmode
;
3260 rtx x
= *px
, y
= *py
;
3261 int unsignedp
= *punsignedp
;
3262 enum mode_class
class;
3264 class = GET_MODE_CLASS (mode
);
3266 /* They could both be VOIDmode if both args are immediate constants,
3267 but we should fold that at an earlier stage.
3268 With no special code here, this will call abort,
3269 reminding the programmer to implement such folding. */
3271 if (mode
!= BLKmode
&& flag_force_mem
)
3273 x
= force_not_mem (x
);
3274 y
= force_not_mem (y
);
3277 /* If we are inside an appropriately-short loop and one operand is an
3278 expensive constant, force it into a register. */
3279 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3280 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3281 x
= force_reg (mode
, x
);
3283 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3284 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3285 y
= force_reg (mode
, y
);
3288 /* Abort if we have a non-canonical comparison. The RTL documentation
3289 states that canonical comparisons are required only for targets which
3291 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3295 /* Don't let both operands fail to indicate the mode. */
3296 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3297 x
= force_reg (mode
, x
);
3299 /* Handle all BLKmode compares. */
3301 if (mode
== BLKmode
)
3304 enum machine_mode result_mode
;
3305 rtx opalign ATTRIBUTE_UNUSED
3306 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3309 x
= protect_from_queue (x
, 0);
3310 y
= protect_from_queue (y
, 0);
3314 #ifdef HAVE_cmpstrqi
3316 && GET_CODE (size
) == CONST_INT
3317 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3319 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3320 result
= gen_reg_rtx (result_mode
);
3321 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3325 #ifdef HAVE_cmpstrhi
3327 && GET_CODE (size
) == CONST_INT
3328 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3330 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3331 result
= gen_reg_rtx (result_mode
);
3332 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3336 #ifdef HAVE_cmpstrsi
3339 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3340 result
= gen_reg_rtx (result_mode
);
3341 size
= protect_from_queue (size
, 0);
3342 emit_insn (gen_cmpstrsi (result
, x
, y
,
3343 convert_to_mode (SImode
, size
, 1),
3349 #ifdef TARGET_MEM_FUNCTIONS
3350 emit_library_call (memcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3351 TYPE_MODE (integer_type_node
), 3,
3352 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3353 convert_to_mode (TYPE_MODE (sizetype
), size
,
3354 TREE_UNSIGNED (sizetype
)),
3355 TYPE_MODE (sizetype
));
3357 emit_library_call (bcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3358 TYPE_MODE (integer_type_node
), 3,
3359 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3360 convert_to_mode (TYPE_MODE (integer_type_node
),
3362 TREE_UNSIGNED (integer_type_node
)),
3363 TYPE_MODE (integer_type_node
));
3366 /* Immediately move the result of the libcall into a pseudo
3367 register so reload doesn't clobber the value if it needs
3368 the return register for a spill reg. */
3369 result
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
3370 result_mode
= TYPE_MODE (integer_type_node
);
3371 emit_move_insn (result
,
3372 hard_libcall_value (result_mode
));
3376 *pmode
= result_mode
;
3382 if (can_compare_p (*pcomparison
, mode
, purpose
))
3385 /* Handle a lib call just for the mode we are using. */
3387 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3389 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3392 /* If we want unsigned, and this mode has a distinct unsigned
3393 comparison routine, use that. */
3394 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3395 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3397 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
,
3400 /* Immediately move the result of the libcall into a pseudo
3401 register so reload doesn't clobber the value if it needs
3402 the return register for a spill reg. */
3403 result
= gen_reg_rtx (word_mode
);
3404 emit_move_insn (result
, hard_libcall_value (word_mode
));
3406 /* Integer comparison returns a result that must be compared against 1,
3407 so that even if we do an unsigned compare afterward,
3408 there is still a value that can represent the result "less than". */
3415 if (class == MODE_FLOAT
)
3416 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3422 /* Before emitting an insn with code ICODE, make sure that X, which is going
3423 to be used for operand OPNUM of the insn, is converted from mode MODE to
3424 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3425 that it is accepted by the operand predicate. Return the new value. */
3428 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3432 enum machine_mode mode
, wider_mode
;
3435 x
= protect_from_queue (x
, 0);
3437 if (mode
!= wider_mode
)
3438 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3440 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3441 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3442 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3446 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3447 we can do the comparison.
3448 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3449 be NULL_RTX which indicates that only a comparison is to be generated. */
3452 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3454 enum machine_mode mode
;
3455 enum rtx_code comparison
;
3459 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3460 enum mode_class
class = GET_MODE_CLASS (mode
);
3461 enum machine_mode wider_mode
= mode
;
3463 /* Try combined insns first. */
3466 enum insn_code icode
;
3467 PUT_MODE (test
, wider_mode
);
3471 icode
= cbranch_optab
->handlers
[(int)wider_mode
].insn_code
;
3473 if (icode
!= CODE_FOR_nothing
3474 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3476 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3477 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3478 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3483 /* Handle some compares against zero. */
3484 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3485 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3487 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3488 emit_insn (GEN_FCN (icode
) (x
));
3490 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3494 /* Handle compares for which there is a directly suitable insn. */
3496 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3497 if (icode
!= CODE_FOR_nothing
)
3499 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3500 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3501 emit_insn (GEN_FCN (icode
) (x
, y
));
3503 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3507 if (class != MODE_INT
&& class != MODE_FLOAT
3508 && class != MODE_COMPLEX_FLOAT
)
3511 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3512 } while (wider_mode
!= VOIDmode
);
3517 /* Generate code to compare X with Y so that the condition codes are
3518 set and to jump to LABEL if the condition is true. If X is a
3519 constant and Y is not a constant, then the comparison is swapped to
3520 ensure that the comparison RTL has the canonical form.
3522 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3523 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3524 the proper branch condition code.
3526 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3528 MODE is the mode of the inputs (in case they are const_int).
3530 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3531 be passed unchanged to emit_cmp_insn, then potentially converted into an
3532 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3535 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3537 enum rtx_code comparison
;
3539 enum machine_mode mode
;
3543 rtx op0
= x
, op1
= y
;
3545 /* Swap operands and condition to ensure canonical RTL. */
3546 if (swap_commutative_operands_p (x
, y
))
3548 /* If we're not emitting a branch, this means some caller
3554 comparison
= swap_condition (comparison
);
3558 /* If OP0 is still a constant, then both X and Y must be constants. Force
3559 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3561 if (CONSTANT_P (op0
))
3562 op0
= force_reg (mode
, op0
);
3567 comparison
= unsigned_condition (comparison
);
3569 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3571 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3574 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3577 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3579 enum rtx_code comparison
;
3581 enum machine_mode mode
;
3584 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3587 /* Emit a library call comparison between floating point X and Y.
3588 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3591 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3593 enum rtx_code
*pcomparison
;
3594 enum machine_mode
*pmode
;
3597 enum rtx_code comparison
= *pcomparison
;
3599 rtx x
= *px
= protect_from_queue (*px
, 0);
3600 rtx y
= *py
= protect_from_queue (*py
, 0);
3601 enum machine_mode mode
= GET_MODE (x
);
3609 libfunc
= eqhf2_libfunc
;
3613 libfunc
= nehf2_libfunc
;
3617 libfunc
= gthf2_libfunc
;
3618 if (libfunc
== NULL_RTX
)
3620 tmp
= x
; x
= y
; y
= tmp
;
3622 libfunc
= lthf2_libfunc
;
3627 libfunc
= gehf2_libfunc
;
3628 if (libfunc
== NULL_RTX
)
3630 tmp
= x
; x
= y
; y
= tmp
;
3632 libfunc
= lehf2_libfunc
;
3637 libfunc
= lthf2_libfunc
;
3638 if (libfunc
== NULL_RTX
)
3640 tmp
= x
; x
= y
; y
= tmp
;
3642 libfunc
= gthf2_libfunc
;
3647 libfunc
= lehf2_libfunc
;
3648 if (libfunc
== NULL_RTX
)
3650 tmp
= x
; x
= y
; y
= tmp
;
3652 libfunc
= gehf2_libfunc
;
3657 libfunc
= unordhf2_libfunc
;
3663 else if (mode
== SFmode
)
3667 libfunc
= eqsf2_libfunc
;
3671 libfunc
= nesf2_libfunc
;
3675 libfunc
= gtsf2_libfunc
;
3676 if (libfunc
== NULL_RTX
)
3678 tmp
= x
; x
= y
; y
= tmp
;
3680 libfunc
= ltsf2_libfunc
;
3685 libfunc
= gesf2_libfunc
;
3686 if (libfunc
== NULL_RTX
)
3688 tmp
= x
; x
= y
; y
= tmp
;
3690 libfunc
= lesf2_libfunc
;
3695 libfunc
= ltsf2_libfunc
;
3696 if (libfunc
== NULL_RTX
)
3698 tmp
= x
; x
= y
; y
= tmp
;
3700 libfunc
= gtsf2_libfunc
;
3705 libfunc
= lesf2_libfunc
;
3706 if (libfunc
== NULL_RTX
)
3708 tmp
= x
; x
= y
; y
= tmp
;
3710 libfunc
= gesf2_libfunc
;
3715 libfunc
= unordsf2_libfunc
;
3721 else if (mode
== DFmode
)
3725 libfunc
= eqdf2_libfunc
;
3729 libfunc
= nedf2_libfunc
;
3733 libfunc
= gtdf2_libfunc
;
3734 if (libfunc
== NULL_RTX
)
3736 tmp
= x
; x
= y
; y
= tmp
;
3738 libfunc
= ltdf2_libfunc
;
3743 libfunc
= gedf2_libfunc
;
3744 if (libfunc
== NULL_RTX
)
3746 tmp
= x
; x
= y
; y
= tmp
;
3748 libfunc
= ledf2_libfunc
;
3753 libfunc
= ltdf2_libfunc
;
3754 if (libfunc
== NULL_RTX
)
3756 tmp
= x
; x
= y
; y
= tmp
;
3758 libfunc
= gtdf2_libfunc
;
3763 libfunc
= ledf2_libfunc
;
3764 if (libfunc
== NULL_RTX
)
3766 tmp
= x
; x
= y
; y
= tmp
;
3768 libfunc
= gedf2_libfunc
;
3773 libfunc
= unorddf2_libfunc
;
3779 else if (mode
== XFmode
)
3783 libfunc
= eqxf2_libfunc
;
3787 libfunc
= nexf2_libfunc
;
3791 libfunc
= gtxf2_libfunc
;
3792 if (libfunc
== NULL_RTX
)
3794 tmp
= x
; x
= y
; y
= tmp
;
3796 libfunc
= ltxf2_libfunc
;
3801 libfunc
= gexf2_libfunc
;
3802 if (libfunc
== NULL_RTX
)
3804 tmp
= x
; x
= y
; y
= tmp
;
3806 libfunc
= lexf2_libfunc
;
3811 libfunc
= ltxf2_libfunc
;
3812 if (libfunc
== NULL_RTX
)
3814 tmp
= x
; x
= y
; y
= tmp
;
3816 libfunc
= gtxf2_libfunc
;
3821 libfunc
= lexf2_libfunc
;
3822 if (libfunc
== NULL_RTX
)
3824 tmp
= x
; x
= y
; y
= tmp
;
3826 libfunc
= gexf2_libfunc
;
3831 libfunc
= unordxf2_libfunc
;
3837 else if (mode
== TFmode
)
3841 libfunc
= eqtf2_libfunc
;
3845 libfunc
= netf2_libfunc
;
3849 libfunc
= gttf2_libfunc
;
3850 if (libfunc
== NULL_RTX
)
3852 tmp
= x
; x
= y
; y
= tmp
;
3854 libfunc
= lttf2_libfunc
;
3859 libfunc
= getf2_libfunc
;
3860 if (libfunc
== NULL_RTX
)
3862 tmp
= x
; x
= y
; y
= tmp
;
3864 libfunc
= letf2_libfunc
;
3869 libfunc
= lttf2_libfunc
;
3870 if (libfunc
== NULL_RTX
)
3872 tmp
= x
; x
= y
; y
= tmp
;
3874 libfunc
= gttf2_libfunc
;
3879 libfunc
= letf2_libfunc
;
3880 if (libfunc
== NULL_RTX
)
3882 tmp
= x
; x
= y
; y
= tmp
;
3884 libfunc
= getf2_libfunc
;
3889 libfunc
= unordtf2_libfunc
;
3897 enum machine_mode wider_mode
;
3899 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3900 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3902 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3903 != CODE_FOR_nothing
)
3904 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3906 x
= protect_from_queue (x
, 0);
3907 y
= protect_from_queue (y
, 0);
3908 *px
= convert_to_mode (wider_mode
, x
, 0);
3909 *py
= convert_to_mode (wider_mode
, y
, 0);
3910 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3920 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
, y
,
3923 /* Immediately move the result of the libcall into a pseudo
3924 register so reload doesn't clobber the value if it needs
3925 the return register for a spill reg. */
3926 result
= gen_reg_rtx (word_mode
);
3927 emit_move_insn (result
, hard_libcall_value (word_mode
));
3931 if (comparison
== UNORDERED
)
3933 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
3934 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3940 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3943 emit_indirect_jump (loc
)
3946 if (! ((*insn_data
[(int)CODE_FOR_indirect_jump
].operand
[0].predicate
)
3948 loc
= copy_to_mode_reg (Pmode
, loc
);
3950 emit_jump_insn (gen_indirect_jump (loc
));
3954 #ifdef HAVE_conditional_move
3956 /* Emit a conditional move instruction if the machine supports one for that
3957 condition and machine mode.
3959 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3960 the mode to use should they be constants. If it is VOIDmode, they cannot
3963 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3964 should be stored there. MODE is the mode to use should they be constants.
3965 If it is VOIDmode, they cannot both be constants.
3967 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3968 is not supported. */
3971 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3976 enum machine_mode cmode
;
3978 enum machine_mode mode
;
3981 rtx tem
, subtarget
, comparison
, insn
;
3982 enum insn_code icode
;
3983 enum rtx_code reversed
;
3985 /* If one operand is constant, make it the second one. Only do this
3986 if the other operand is not constant as well. */
3988 if (swap_commutative_operands_p (op0
, op1
))
3993 code
= swap_condition (code
);
3996 /* get_condition will prefer to generate LT and GT even if the old
3997 comparison was against zero, so undo that canonicalization here since
3998 comparisons against zero are cheaper. */
3999 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4000 code
= LE
, op1
= const0_rtx
;
4001 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4002 code
= GE
, op1
= const0_rtx
;
4004 if (cmode
== VOIDmode
)
4005 cmode
= GET_MODE (op0
);
4007 if (swap_commutative_operands_p (op2
, op3
)
4008 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4017 if (mode
== VOIDmode
)
4018 mode
= GET_MODE (op2
);
4020 icode
= movcc_gen_code
[mode
];
4022 if (icode
== CODE_FOR_nothing
)
4027 op2
= force_not_mem (op2
);
4028 op3
= force_not_mem (op3
);
4032 target
= protect_from_queue (target
, 1);
4034 target
= gen_reg_rtx (mode
);
4040 op2
= protect_from_queue (op2
, 0);
4041 op3
= protect_from_queue (op3
, 0);
4043 /* If the insn doesn't accept these operands, put them in pseudos. */
4045 if (! (*insn_data
[icode
].operand
[0].predicate
)
4046 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4047 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4049 if (! (*insn_data
[icode
].operand
[2].predicate
)
4050 (op2
, insn_data
[icode
].operand
[2].mode
))
4051 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4053 if (! (*insn_data
[icode
].operand
[3].predicate
)
4054 (op3
, insn_data
[icode
].operand
[3].mode
))
4055 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4057 /* Everything should now be in the suitable form, so emit the compare insn
4058 and then the conditional move. */
4061 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4063 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4064 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4065 return NULL and let the caller figure out how best to deal with this
4067 if (GET_CODE (comparison
) != code
)
4070 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4072 /* If that failed, then give up. */
4078 if (subtarget
!= target
)
4079 convert_move (target
, subtarget
, 0);
4084 /* Return non-zero if a conditional move of mode MODE is supported.
4086 This function is for combine so it can tell whether an insn that looks
4087 like a conditional move is actually supported by the hardware. If we
4088 guess wrong we lose a bit on optimization, but that's it. */
4089 /* ??? sparc64 supports conditionally moving integers values based on fp
4090 comparisons, and vice versa. How do we handle them? */
4093 can_conditionally_move_p (mode
)
4094 enum machine_mode mode
;
4096 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4102 #endif /* HAVE_conditional_move */
4104 /* These functions generate an insn body and return it
4105 rather than emitting the insn.
4107 They do not protect from queued increments,
4108 because they may be used 1) in protect_from_queue itself
4109 and 2) in other passes where there is no queue. */
4111 /* Generate and return an insn body to add Y to X. */
4114 gen_add2_insn (x
, y
)
4117 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4119 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4120 (x
, insn_data
[icode
].operand
[0].mode
))
4121 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4122 (x
, insn_data
[icode
].operand
[1].mode
))
4123 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4124 (y
, insn_data
[icode
].operand
[2].mode
)))
4127 return (GEN_FCN (icode
) (x
, x
, y
));
4130 /* Generate and return an insn body to add r1 and c,
4131 storing the result in r0. */
4133 gen_add3_insn (r0
, r1
, c
)
4136 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4138 if (icode
== CODE_FOR_nothing
4139 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4140 (r0
, insn_data
[icode
].operand
[0].mode
))
4141 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4142 (r1
, insn_data
[icode
].operand
[1].mode
))
4143 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4144 (c
, insn_data
[icode
].operand
[2].mode
)))
4147 return (GEN_FCN (icode
) (r0
, r1
, c
));
4151 have_add2_insn (x
, y
)
4156 if (GET_MODE (x
) == VOIDmode
)
4159 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4161 if (icode
== CODE_FOR_nothing
)
4164 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4165 (x
, insn_data
[icode
].operand
[0].mode
))
4166 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4167 (x
, insn_data
[icode
].operand
[1].mode
))
4168 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4169 (y
, insn_data
[icode
].operand
[2].mode
)))
4175 /* Generate and return an insn body to subtract Y from X. */
4178 gen_sub2_insn (x
, y
)
4181 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4183 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4184 (x
, insn_data
[icode
].operand
[0].mode
))
4185 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4186 (x
, insn_data
[icode
].operand
[1].mode
))
4187 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4188 (y
, insn_data
[icode
].operand
[2].mode
)))
4191 return (GEN_FCN (icode
) (x
, x
, y
));
4194 /* Generate and return an insn body to subtract r1 and c,
4195 storing the result in r0. */
4197 gen_sub3_insn (r0
, r1
, c
)
4200 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4202 if (icode
== CODE_FOR_nothing
4203 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4204 (r0
, insn_data
[icode
].operand
[0].mode
))
4205 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4206 (r1
, insn_data
[icode
].operand
[1].mode
))
4207 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4208 (c
, insn_data
[icode
].operand
[2].mode
)))
4211 return (GEN_FCN (icode
) (r0
, r1
, c
));
4215 have_sub2_insn (x
, y
)
4220 if (GET_MODE (x
) == VOIDmode
)
4223 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4225 if (icode
== CODE_FOR_nothing
)
4228 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4229 (x
, insn_data
[icode
].operand
[0].mode
))
4230 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4231 (x
, insn_data
[icode
].operand
[1].mode
))
4232 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4233 (y
, insn_data
[icode
].operand
[2].mode
)))
4239 /* Generate the body of an instruction to copy Y into X.
4240 It may be a list of insns, if one insn isn't enough. */
4243 gen_move_insn (x
, y
)
4246 enum machine_mode mode
= GET_MODE (x
);
4247 enum insn_code insn_code
;
4250 if (mode
== VOIDmode
)
4251 mode
= GET_MODE (y
);
4253 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
4255 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
4256 find a mode to do it in. If we have a movcc, use it. Otherwise,
4257 find the MODE_INT mode of the same width. */
4259 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
4261 enum machine_mode tmode
= VOIDmode
;
4265 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
4268 for (tmode
= QImode
; tmode
!= VOIDmode
;
4269 tmode
= GET_MODE_WIDER_MODE (tmode
))
4270 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
4273 if (tmode
== VOIDmode
)
4276 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
4277 may call change_address which is not appropriate if we were
4278 called when a reload was in progress. We don't have to worry
4279 about changing the address since the size in bytes is supposed to
4280 be the same. Copy the MEM to change the mode and move any
4281 substitutions from the old MEM to the new one. */
4283 if (reload_in_progress
)
4285 x
= gen_lowpart_common (tmode
, x1
);
4286 if (x
== 0 && GET_CODE (x1
) == MEM
)
4288 x
= adjust_address_nv (x1
, tmode
, 0);
4289 copy_replacements (x1
, x
);
4292 y
= gen_lowpart_common (tmode
, y1
);
4293 if (y
== 0 && GET_CODE (y1
) == MEM
)
4295 y
= adjust_address_nv (y1
, tmode
, 0);
4296 copy_replacements (y1
, y
);
4301 x
= gen_lowpart (tmode
, x
);
4302 y
= gen_lowpart (tmode
, y
);
4305 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
4306 return (GEN_FCN (insn_code
) (x
, y
));
4310 emit_move_insn_1 (x
, y
);
4316 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4317 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4318 no such operation exists, CODE_FOR_nothing will be returned. */
4321 can_extend_p (to_mode
, from_mode
, unsignedp
)
4322 enum machine_mode to_mode
, from_mode
;
4325 #ifdef HAVE_ptr_extend
4327 return CODE_FOR_ptr_extend
;
4330 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4333 /* Generate the body of an insn to extend Y (with mode MFROM)
4334 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4337 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4339 enum machine_mode mto
, mfrom
;
4342 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4345 /* can_fix_p and can_float_p say whether the target machine
4346 can directly convert a given fixed point type to
4347 a given floating point type, or vice versa.
4348 The returned value is the CODE_FOR_... value to use,
4349 or CODE_FOR_nothing if these modes cannot be directly converted.
4351 *TRUNCP_PTR is set to 1 if it is necessary to output
4352 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4354 static enum insn_code
4355 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4356 enum machine_mode fltmode
, fixmode
;
4361 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4362 != CODE_FOR_nothing
)
4363 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4365 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4368 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4370 return CODE_FOR_nothing
;
4373 static enum insn_code
4374 can_float_p (fltmode
, fixmode
, unsignedp
)
4375 enum machine_mode fixmode
, fltmode
;
4378 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4381 /* Generate code to convert FROM to floating point
4382 and store in TO. FROM must be fixed point and not VOIDmode.
4383 UNSIGNEDP nonzero means regard FROM as unsigned.
4384 Normally this is done by correcting the final value
4385 if it is negative. */
4388 expand_float (to
, from
, unsignedp
)
4392 enum insn_code icode
;
4394 enum machine_mode fmode
, imode
;
4396 /* Crash now, because we won't be able to decide which mode to use. */
4397 if (GET_MODE (from
) == VOIDmode
)
4400 /* Look for an insn to do the conversion. Do it in the specified
4401 modes if possible; otherwise convert either input, output or both to
4402 wider mode. If the integer mode is wider than the mode of FROM,
4403 we can do the conversion signed even if the input is unsigned. */
4405 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4406 imode
= GET_MODE_WIDER_MODE (imode
))
4407 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4408 fmode
= GET_MODE_WIDER_MODE (fmode
))
4410 int doing_unsigned
= unsignedp
;
4412 if (fmode
!= GET_MODE (to
)
4413 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4416 icode
= can_float_p (fmode
, imode
, unsignedp
);
4417 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4418 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4420 if (icode
!= CODE_FOR_nothing
)
4422 to
= protect_from_queue (to
, 1);
4423 from
= protect_from_queue (from
, 0);
4425 if (imode
!= GET_MODE (from
))
4426 from
= convert_to_mode (imode
, from
, unsignedp
);
4428 if (fmode
!= GET_MODE (to
))
4429 target
= gen_reg_rtx (fmode
);
4431 emit_unop_insn (icode
, target
, from
,
4432 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4435 convert_move (to
, target
, 0);
4440 /* Unsigned integer, and no way to convert directly.
4441 Convert as signed, then conditionally adjust the result. */
4444 rtx label
= gen_label_rtx ();
4446 REAL_VALUE_TYPE offset
;
4450 to
= protect_from_queue (to
, 1);
4451 from
= protect_from_queue (from
, 0);
4454 from
= force_not_mem (from
);
4456 /* Look for a usable floating mode FMODE wider than the source and at
4457 least as wide as the target. Using FMODE will avoid rounding woes
4458 with unsigned values greater than the signed maximum value. */
4460 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4461 fmode
= GET_MODE_WIDER_MODE (fmode
))
4462 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4463 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4466 if (fmode
== VOIDmode
)
4468 /* There is no such mode. Pretend the target is wide enough. */
4469 fmode
= GET_MODE (to
);
4471 /* Avoid double-rounding when TO is narrower than FROM. */
4472 if ((significand_size (fmode
) + 1)
4473 < GET_MODE_BITSIZE (GET_MODE (from
)))
4476 rtx neglabel
= gen_label_rtx ();
4478 /* Don't use TARGET if it isn't a register, is a hard register,
4479 or is the wrong mode. */
4480 if (GET_CODE (target
) != REG
4481 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4482 || GET_MODE (target
) != fmode
)
4483 target
= gen_reg_rtx (fmode
);
4485 imode
= GET_MODE (from
);
4486 do_pending_stack_adjust ();
4488 /* Test whether the sign bit is set. */
4489 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4492 /* The sign bit is not set. Convert as signed. */
4493 expand_float (target
, from
, 0);
4494 emit_jump_insn (gen_jump (label
));
4497 /* The sign bit is set.
4498 Convert to a usable (positive signed) value by shifting right
4499 one bit, while remembering if a nonzero bit was shifted
4500 out; i.e., compute (from & 1) | (from >> 1). */
4502 emit_label (neglabel
);
4503 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4504 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4505 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4507 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4509 expand_float (target
, temp
, 0);
4511 /* Multiply by 2 to undo the shift above. */
4512 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4513 target
, 0, OPTAB_LIB_WIDEN
);
4515 emit_move_insn (target
, temp
);
4517 do_pending_stack_adjust ();
4523 /* If we are about to do some arithmetic to correct for an
4524 unsigned operand, do it in a pseudo-register. */
4526 if (GET_MODE (to
) != fmode
4527 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4528 target
= gen_reg_rtx (fmode
);
4530 /* Convert as signed integer to floating. */
4531 expand_float (target
, from
, 0);
4533 /* If FROM is negative (and therefore TO is negative),
4534 correct its value by 2**bitwidth. */
4536 do_pending_stack_adjust ();
4537 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4540 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
4541 Rather than setting up a dconst_dot_5, let's hope SCO
4543 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
4544 temp
= expand_binop (fmode
, add_optab
, target
,
4545 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4546 target
, 0, OPTAB_LIB_WIDEN
);
4548 emit_move_insn (target
, temp
);
4550 do_pending_stack_adjust ();
4555 /* No hardware instruction available; call a library routine to convert from
4556 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4562 to
= protect_from_queue (to
, 1);
4563 from
= protect_from_queue (from
, 0);
4565 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4566 from
= convert_to_mode (SImode
, from
, unsignedp
);
4569 from
= force_not_mem (from
);
4571 if (GET_MODE (to
) == SFmode
)
4573 if (GET_MODE (from
) == SImode
)
4574 libfcn
= floatsisf_libfunc
;
4575 else if (GET_MODE (from
) == DImode
)
4576 libfcn
= floatdisf_libfunc
;
4577 else if (GET_MODE (from
) == TImode
)
4578 libfcn
= floattisf_libfunc
;
4582 else if (GET_MODE (to
) == DFmode
)
4584 if (GET_MODE (from
) == SImode
)
4585 libfcn
= floatsidf_libfunc
;
4586 else if (GET_MODE (from
) == DImode
)
4587 libfcn
= floatdidf_libfunc
;
4588 else if (GET_MODE (from
) == TImode
)
4589 libfcn
= floattidf_libfunc
;
4593 else if (GET_MODE (to
) == XFmode
)
4595 if (GET_MODE (from
) == SImode
)
4596 libfcn
= floatsixf_libfunc
;
4597 else if (GET_MODE (from
) == DImode
)
4598 libfcn
= floatdixf_libfunc
;
4599 else if (GET_MODE (from
) == TImode
)
4600 libfcn
= floattixf_libfunc
;
4604 else if (GET_MODE (to
) == TFmode
)
4606 if (GET_MODE (from
) == SImode
)
4607 libfcn
= floatsitf_libfunc
;
4608 else if (GET_MODE (from
) == DImode
)
4609 libfcn
= floatditf_libfunc
;
4610 else if (GET_MODE (from
) == TImode
)
4611 libfcn
= floattitf_libfunc
;
4620 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4621 GET_MODE (to
), 1, from
,
4623 insns
= get_insns ();
4626 emit_libcall_block (insns
, target
, value
,
4627 gen_rtx_FLOAT (GET_MODE (to
), from
));
4632 /* Copy result to requested destination
4633 if we have been computing in a temp location. */
4637 if (GET_MODE (target
) == GET_MODE (to
))
4638 emit_move_insn (to
, target
);
4640 convert_move (to
, target
, 0);
4644 /* expand_fix: generate code to convert FROM to fixed point
4645 and store in TO. FROM must be floating point. */
4651 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4652 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4656 expand_fix (to
, from
, unsignedp
)
4660 enum insn_code icode
;
4662 enum machine_mode fmode
, imode
;
4666 /* We first try to find a pair of modes, one real and one integer, at
4667 least as wide as FROM and TO, respectively, in which we can open-code
4668 this conversion. If the integer mode is wider than the mode of TO,
4669 we can do the conversion either signed or unsigned. */
4671 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4672 fmode
= GET_MODE_WIDER_MODE (fmode
))
4673 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4674 imode
= GET_MODE_WIDER_MODE (imode
))
4676 int doing_unsigned
= unsignedp
;
4678 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4679 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4680 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4682 if (icode
!= CODE_FOR_nothing
)
4684 to
= protect_from_queue (to
, 1);
4685 from
= protect_from_queue (from
, 0);
4687 if (fmode
!= GET_MODE (from
))
4688 from
= convert_to_mode (fmode
, from
, 0);
4691 from
= ftruncify (from
);
4693 if (imode
!= GET_MODE (to
))
4694 target
= gen_reg_rtx (imode
);
4696 emit_unop_insn (icode
, target
, from
,
4697 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4699 convert_move (to
, target
, unsignedp
);
4704 /* For an unsigned conversion, there is one more way to do it.
4705 If we have a signed conversion, we generate code that compares
4706 the real value to the largest representable positive number. If if
4707 is smaller, the conversion is done normally. Otherwise, subtract
4708 one plus the highest signed number, convert, and add it back.
4710 We only need to check all real modes, since we know we didn't find
4711 anything with a wider integer mode. */
4713 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4714 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4715 fmode
= GET_MODE_WIDER_MODE (fmode
))
4716 /* Make sure we won't lose significant bits doing this. */
4717 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
4718 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4722 REAL_VALUE_TYPE offset
;
4723 rtx limit
, lab1
, lab2
, insn
;
4725 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4726 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
4727 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4728 lab1
= gen_label_rtx ();
4729 lab2
= gen_label_rtx ();
4732 to
= protect_from_queue (to
, 1);
4733 from
= protect_from_queue (from
, 0);
4736 from
= force_not_mem (from
);
4738 if (fmode
!= GET_MODE (from
))
4739 from
= convert_to_mode (fmode
, from
, 0);
4741 /* See if we need to do the subtraction. */
4742 do_pending_stack_adjust ();
4743 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4746 /* If not, do the signed "fix" and branch around fixup code. */
4747 expand_fix (to
, from
, 0);
4748 emit_jump_insn (gen_jump (lab2
));
4751 /* Otherwise, subtract 2**(N-1), convert to signed number,
4752 then add 2**(N-1). Do the addition using XOR since this
4753 will often generate better code. */
4755 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4756 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4757 expand_fix (to
, target
, 0);
4758 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4760 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4762 to
, 1, OPTAB_LIB_WIDEN
);
4765 emit_move_insn (to
, target
);
4769 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4770 != CODE_FOR_nothing
)
4772 /* Make a place for a REG_NOTE and add it. */
4773 insn
= emit_move_insn (to
, to
);
4774 set_unique_reg_note (insn
,
4776 gen_rtx_fmt_e (UNSIGNED_FIX
,
4784 /* We can't do it with an insn, so use a library call. But first ensure
4785 that the mode of TO is at least as wide as SImode, since those are the
4786 only library calls we know about. */
4788 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4790 target
= gen_reg_rtx (SImode
);
4792 expand_fix (target
, from
, unsignedp
);
4794 else if (GET_MODE (from
) == SFmode
)
4796 if (GET_MODE (to
) == SImode
)
4797 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
4798 else if (GET_MODE (to
) == DImode
)
4799 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
4800 else if (GET_MODE (to
) == TImode
)
4801 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
4805 else if (GET_MODE (from
) == DFmode
)
4807 if (GET_MODE (to
) == SImode
)
4808 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
4809 else if (GET_MODE (to
) == DImode
)
4810 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
4811 else if (GET_MODE (to
) == TImode
)
4812 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
4816 else if (GET_MODE (from
) == XFmode
)
4818 if (GET_MODE (to
) == SImode
)
4819 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
4820 else if (GET_MODE (to
) == DImode
)
4821 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
4822 else if (GET_MODE (to
) == TImode
)
4823 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
4827 else if (GET_MODE (from
) == TFmode
)
4829 if (GET_MODE (to
) == SImode
)
4830 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
4831 else if (GET_MODE (to
) == DImode
)
4832 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
4833 else if (GET_MODE (to
) == TImode
)
4834 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
4846 to
= protect_from_queue (to
, 1);
4847 from
= protect_from_queue (from
, 0);
4850 from
= force_not_mem (from
);
4854 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4855 GET_MODE (to
), 1, from
,
4857 insns
= get_insns ();
4860 emit_libcall_block (insns
, target
, value
,
4861 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4862 GET_MODE (to
), from
));
4867 if (GET_MODE (to
) == GET_MODE (target
))
4868 emit_move_insn (to
, target
);
4870 convert_move (to
, target
, 0);
4874 /* Report whether we have an instruction to perform the operation
4875 specified by CODE on operands of mode MODE. */
4877 have_insn_for (code
, mode
)
4879 enum machine_mode mode
;
4881 return (code_to_optab
[(int) code
] != 0
4882 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4883 != CODE_FOR_nothing
));
4886 /* Create a blank optab. */
4891 optab op
= (optab
) ggc_alloc (sizeof (struct optab
));
4892 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4894 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4895 op
->handlers
[i
].libfunc
= 0;
4901 /* Same, but fill in its code as CODE, and write it into the
4902 code_to_optab table. */
4907 optab op
= new_optab ();
4909 code_to_optab
[(int) code
] = op
;
4913 /* Same, but fill in its code as CODE, and do _not_ write it into
4914 the code_to_optab table. */
4919 optab op
= new_optab ();
4924 /* Initialize the libfunc fields of an entire group of entries in some
4925 optab. Each entry is set equal to a string consisting of a leading
4926 pair of underscores followed by a generic operation name followed by
4927 a mode name (downshifted to lower case) followed by a single character
4928 representing the number of operands for the given operation (which is
4929 usually one of the characters '2', '3', or '4').
4931 OPTABLE is the table in which libfunc fields are to be initialized.
4932 FIRST_MODE is the first machine mode index in the given optab to
4934 LAST_MODE is the last machine mode index in the given optab to
4936 OPNAME is the generic (string) name of the operation.
4937 SUFFIX is the character which specifies the number of operands for
4938 the given generic operation.
4942 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
4950 unsigned opname_len
= strlen (opname
);
4952 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4953 mode
= (enum machine_mode
) ((int) mode
+ 1))
4955 const char *mname
= GET_MODE_NAME(mode
);
4956 unsigned mname_len
= strlen (mname
);
4957 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4964 for (q
= opname
; *q
; )
4966 for (q
= mname
; *q
; q
++)
4967 *p
++ = TOLOWER (*q
);
4971 optable
->handlers
[(int) mode
].libfunc
4972 = gen_rtx_SYMBOL_REF (Pmode
, ggc_alloc_string (libfunc_name
,
4977 /* Initialize the libfunc fields of an entire group of entries in some
4978 optab which correspond to all integer mode operations. The parameters
4979 have the same meaning as similarly named ones for the `init_libfuncs'
4980 routine. (See above). */
4983 init_integral_libfuncs (optable
, opname
, suffix
)
4988 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
4991 /* Initialize the libfunc fields of an entire group of entries in some
4992 optab which correspond to all real mode operations. The parameters
4993 have the same meaning as similarly named ones for the `init_libfuncs'
4994 routine. (See above). */
4997 init_floating_libfuncs (optable
, opname
, suffix
)
5002 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
5006 init_one_libfunc (name
)
5009 /* Create a FUNCTION_DECL that can be passed to
5010 targetm.encode_section_info. */
5011 /* ??? We don't have any type information except for this is
5012 a function. Pretend this is "int foo()". */
5013 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5014 build_function_type (integer_type_node
, NULL_TREE
));
5015 DECL_ARTIFICIAL (decl
) = 1;
5016 DECL_EXTERNAL (decl
) = 1;
5017 TREE_PUBLIC (decl
) = 1;
5019 /* Return the symbol_ref from the mem rtx. */
5020 return XEXP (DECL_RTL (decl
), 0);
5023 /* Call this once to initialize the contents of the optabs
5024 appropriately for the current target machine. */
5029 unsigned int i
, j
, k
;
5031 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5033 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
5034 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
5035 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
5036 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
5038 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
5039 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
5040 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
5041 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
5043 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
5044 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
5045 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
5046 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
5048 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
5049 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
5050 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
5051 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
5053 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5054 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5056 #ifdef HAVE_conditional_move
5057 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5058 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5061 add_optab
= init_optab (PLUS
);
5062 addv_optab
= init_optabv (PLUS
);
5063 sub_optab
= init_optab (MINUS
);
5064 subv_optab
= init_optabv (MINUS
);
5065 smul_optab
= init_optab (MULT
);
5066 smulv_optab
= init_optabv (MULT
);
5067 smul_highpart_optab
= init_optab (UNKNOWN
);
5068 umul_highpart_optab
= init_optab (UNKNOWN
);
5069 smul_widen_optab
= init_optab (UNKNOWN
);
5070 umul_widen_optab
= init_optab (UNKNOWN
);
5071 sdiv_optab
= init_optab (DIV
);
5072 sdivv_optab
= init_optabv (DIV
);
5073 sdivmod_optab
= init_optab (UNKNOWN
);
5074 udiv_optab
= init_optab (UDIV
);
5075 udivmod_optab
= init_optab (UNKNOWN
);
5076 smod_optab
= init_optab (MOD
);
5077 umod_optab
= init_optab (UMOD
);
5078 ftrunc_optab
= init_optab (UNKNOWN
);
5079 and_optab
= init_optab (AND
);
5080 ior_optab
= init_optab (IOR
);
5081 xor_optab
= init_optab (XOR
);
5082 ashl_optab
= init_optab (ASHIFT
);
5083 ashr_optab
= init_optab (ASHIFTRT
);
5084 lshr_optab
= init_optab (LSHIFTRT
);
5085 rotl_optab
= init_optab (ROTATE
);
5086 rotr_optab
= init_optab (ROTATERT
);
5087 smin_optab
= init_optab (SMIN
);
5088 smax_optab
= init_optab (SMAX
);
5089 umin_optab
= init_optab (UMIN
);
5090 umax_optab
= init_optab (UMAX
);
5092 /* These three have codes assigned exclusively for the sake of
5094 mov_optab
= init_optab (SET
);
5095 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5096 cmp_optab
= init_optab (COMPARE
);
5098 ucmp_optab
= init_optab (UNKNOWN
);
5099 tst_optab
= init_optab (UNKNOWN
);
5100 neg_optab
= init_optab (NEG
);
5101 negv_optab
= init_optabv (NEG
);
5102 abs_optab
= init_optab (ABS
);
5103 absv_optab
= init_optabv (ABS
);
5104 one_cmpl_optab
= init_optab (NOT
);
5105 ffs_optab
= init_optab (FFS
);
5106 sqrt_optab
= init_optab (SQRT
);
5107 sin_optab
= init_optab (UNKNOWN
);
5108 cos_optab
= init_optab (UNKNOWN
);
5109 strlen_optab
= init_optab (UNKNOWN
);
5110 cbranch_optab
= init_optab (UNKNOWN
);
5111 cmov_optab
= init_optab (UNKNOWN
);
5112 cstore_optab
= init_optab (UNKNOWN
);
5113 push_optab
= init_optab (UNKNOWN
);
5115 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5117 movstr_optab
[i
] = CODE_FOR_nothing
;
5118 clrstr_optab
[i
] = CODE_FOR_nothing
;
5120 #ifdef HAVE_SECONDARY_RELOADS
5121 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5125 /* Fill in the optabs with the insns we support. */
5128 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5129 /* This flag says the same insns that convert to a signed fixnum
5130 also convert validly to an unsigned one. */
5131 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5132 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5133 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
5136 /* Initialize the optabs with the names of the library functions. */
5137 init_integral_libfuncs (add_optab
, "add", '3');
5138 init_floating_libfuncs (add_optab
, "add", '3');
5139 init_integral_libfuncs (addv_optab
, "addv", '3');
5140 init_floating_libfuncs (addv_optab
, "add", '3');
5141 init_integral_libfuncs (sub_optab
, "sub", '3');
5142 init_floating_libfuncs (sub_optab
, "sub", '3');
5143 init_integral_libfuncs (subv_optab
, "subv", '3');
5144 init_floating_libfuncs (subv_optab
, "sub", '3');
5145 init_integral_libfuncs (smul_optab
, "mul", '3');
5146 init_floating_libfuncs (smul_optab
, "mul", '3');
5147 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5148 init_floating_libfuncs (smulv_optab
, "mul", '3');
5149 init_integral_libfuncs (sdiv_optab
, "div", '3');
5150 init_floating_libfuncs (sdiv_optab
, "div", '3');
5151 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5152 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5153 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5154 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5155 init_integral_libfuncs (smod_optab
, "mod", '3');
5156 init_integral_libfuncs (umod_optab
, "umod", '3');
5157 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5158 init_integral_libfuncs (and_optab
, "and", '3');
5159 init_integral_libfuncs (ior_optab
, "ior", '3');
5160 init_integral_libfuncs (xor_optab
, "xor", '3');
5161 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5162 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5163 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5164 init_integral_libfuncs (smin_optab
, "min", '3');
5165 init_floating_libfuncs (smin_optab
, "min", '3');
5166 init_integral_libfuncs (smax_optab
, "max", '3');
5167 init_floating_libfuncs (smax_optab
, "max", '3');
5168 init_integral_libfuncs (umin_optab
, "umin", '3');
5169 init_integral_libfuncs (umax_optab
, "umax", '3');
5170 init_integral_libfuncs (neg_optab
, "neg", '2');
5171 init_floating_libfuncs (neg_optab
, "neg", '2');
5172 init_integral_libfuncs (negv_optab
, "negv", '2');
5173 init_floating_libfuncs (negv_optab
, "neg", '2');
5174 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5175 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5177 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5178 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5179 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5180 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5182 #ifdef MULSI3_LIBCALL
5183 smul_optab
->handlers
[(int) SImode
].libfunc
5184 = init_one_libfunc (MULSI3_LIBCALL
);
5186 #ifdef MULDI3_LIBCALL
5187 smul_optab
->handlers
[(int) DImode
].libfunc
5188 = init_one_libfunc (MULDI3_LIBCALL
);
5191 #ifdef DIVSI3_LIBCALL
5192 sdiv_optab
->handlers
[(int) SImode
].libfunc
5193 = init_one_libfunc (DIVSI3_LIBCALL
);
5195 #ifdef DIVDI3_LIBCALL
5196 sdiv_optab
->handlers
[(int) DImode
].libfunc
5197 = init_one_libfunc (DIVDI3_LIBCALL
);
5200 #ifdef UDIVSI3_LIBCALL
5201 udiv_optab
->handlers
[(int) SImode
].libfunc
5202 = init_one_libfunc (UDIVSI3_LIBCALL
);
5204 #ifdef UDIVDI3_LIBCALL
5205 udiv_optab
->handlers
[(int) DImode
].libfunc
5206 = init_one_libfunc (UDIVDI3_LIBCALL
);
5209 #ifdef MODSI3_LIBCALL
5210 smod_optab
->handlers
[(int) SImode
].libfunc
5211 = init_one_libfunc (MODSI3_LIBCALL
);
5213 #ifdef MODDI3_LIBCALL
5214 smod_optab
->handlers
[(int) DImode
].libfunc
5215 = init_one_libfunc (MODDI3_LIBCALL
);
5218 #ifdef UMODSI3_LIBCALL
5219 umod_optab
->handlers
[(int) SImode
].libfunc
5220 = init_one_libfunc (UMODSI3_LIBCALL
);
5222 #ifdef UMODDI3_LIBCALL
5223 umod_optab
->handlers
[(int) DImode
].libfunc
5224 = init_one_libfunc (UMODDI3_LIBCALL
);
5227 /* Use cabs for DC complex abs, since systems generally have cabs.
5228 Don't define any libcall for SCmode, so that cabs will be used. */
5229 abs_optab
->handlers
[(int) DCmode
].libfunc
5230 = init_one_libfunc ("cabs");
5232 /* The ffs function operates on `int'. */
5233 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5234 = init_one_libfunc ("ffs");
5236 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
5237 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
5238 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
5239 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
5240 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
5242 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
5243 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
5244 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
5245 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
5246 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
5248 abort_libfunc
= init_one_libfunc ("abort");
5249 memcpy_libfunc
= init_one_libfunc ("memcpy");
5250 memmove_libfunc
= init_one_libfunc ("memmove");
5251 bcopy_libfunc
= init_one_libfunc ("bcopy");
5252 memcmp_libfunc
= init_one_libfunc ("memcmp");
5253 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5254 memset_libfunc
= init_one_libfunc ("memset");
5255 bzero_libfunc
= init_one_libfunc ("bzero");
5257 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5258 ? "_Unwind_SjLj_Resume"
5259 : "_Unwind_Resume");
5260 #ifndef DONT_USE_BUILTIN_SETJMP
5261 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5262 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5264 setjmp_libfunc
= init_one_libfunc ("setjmp");
5265 longjmp_libfunc
= init_one_libfunc ("longjmp");
5267 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5268 unwind_sjlj_unregister_libfunc
5269 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5271 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
5272 nehf2_libfunc
= init_one_libfunc ("__nehf2");
5273 gthf2_libfunc
= init_one_libfunc ("__gthf2");
5274 gehf2_libfunc
= init_one_libfunc ("__gehf2");
5275 lthf2_libfunc
= init_one_libfunc ("__lthf2");
5276 lehf2_libfunc
= init_one_libfunc ("__lehf2");
5277 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
5279 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
5280 nesf2_libfunc
= init_one_libfunc ("__nesf2");
5281 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
5282 gesf2_libfunc
= init_one_libfunc ("__gesf2");
5283 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
5284 lesf2_libfunc
= init_one_libfunc ("__lesf2");
5285 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
5287 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
5288 nedf2_libfunc
= init_one_libfunc ("__nedf2");
5289 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
5290 gedf2_libfunc
= init_one_libfunc ("__gedf2");
5291 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
5292 ledf2_libfunc
= init_one_libfunc ("__ledf2");
5293 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
5295 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
5296 nexf2_libfunc
= init_one_libfunc ("__nexf2");
5297 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
5298 gexf2_libfunc
= init_one_libfunc ("__gexf2");
5299 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
5300 lexf2_libfunc
= init_one_libfunc ("__lexf2");
5301 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
5303 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
5304 netf2_libfunc
= init_one_libfunc ("__netf2");
5305 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5306 getf2_libfunc
= init_one_libfunc ("__getf2");
5307 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5308 letf2_libfunc
= init_one_libfunc ("__letf2");
5309 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5311 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5312 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5313 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5315 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5316 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5317 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5319 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5320 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5321 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5323 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5324 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5325 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5327 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5328 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5329 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5331 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5332 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5333 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5335 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5336 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5337 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5339 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5340 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5341 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5343 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5344 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5345 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5347 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5348 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5349 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5351 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5352 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5353 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5355 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5356 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5357 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5359 /* For function entry/exit instrumentation. */
5360 profile_function_entry_libfunc
5361 = init_one_libfunc ("__cyg_profile_func_enter");
5362 profile_function_exit_libfunc
5363 = init_one_libfunc ("__cyg_profile_func_exit");
5365 #ifdef HAVE_conditional_trap
5369 #ifdef INIT_TARGET_OPTABS
5370 /* Allow the target to add more libcalls or rename some, etc. */
5375 static GTY(()) rtx trap_rtx
;
5377 #ifdef HAVE_conditional_trap
5378 /* The insn generating function can not take an rtx_code argument.
5379 TRAP_RTX is used as an rtx argument. Its code is replaced with
5380 the code to be used in the trap insn and all other fields are
5386 if (HAVE_conditional_trap
)
5388 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5393 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5394 CODE. Return 0 on failure. */
5397 gen_cond_trap (code
, op1
, op2
, tcode
)
5398 enum rtx_code code ATTRIBUTE_UNUSED
;
5399 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5401 enum machine_mode mode
= GET_MODE (op1
);
5403 if (mode
== VOIDmode
)
5406 #ifdef HAVE_conditional_trap
5407 if (HAVE_conditional_trap
5408 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5412 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5413 PUT_CODE (trap_rtx
, code
);
5414 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5418 insn
= get_insns ();
5428 #include "gt-optabs.h"