1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 /* Include insn-config.h before expr.h so that HAVE_conditional_move
28 is properly defined. */
29 #include "insn-config.h"
44 /* Each optab contains info on how this target machine
45 can perform a particular operation
46 for all sizes and kinds of operands.
48 The operation to be performed is often specified
49 by passing one of these optabs as an argument.
51 See expr.h for documentation of these optabs. */
53 optab optab_table
[OTI_MAX
];
55 rtx libfunc_table
[LTI_MAX
];
57 /* Tables of patterns for extending one integer mode to another. */
58 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
60 /* Tables of patterns for converting between fixed and floating point. */
61 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
62 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
63 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab
[NUM_RTX_CODE
+ 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
88 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
89 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
90 enum machine_mode
, int, int));
91 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
92 rtx
, rtx
, enum machine_mode
,
93 int, enum optab_methods
,
94 enum mode_class
, optab
));
95 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
96 rtx
, rtx
, enum machine_mode
,
97 int, enum optab_methods
,
98 enum mode_class
, optab
));
99 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
100 enum machine_mode
*, int *,
101 enum can_compare_purpose
));
102 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
104 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
107 static rtx ftruncify
PARAMS ((rtx
));
108 static optab new_optab
PARAMS ((void));
109 static inline optab init_optab
PARAMS ((enum rtx_code
));
110 static inline optab init_optabv
PARAMS ((enum rtx_code
));
111 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
112 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
113 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
114 #ifdef HAVE_conditional_trap
115 static void init_traps
PARAMS ((void));
117 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
118 enum rtx_code
, int, rtx
));
119 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
120 enum machine_mode
*, int *));
122 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
123 the result of operation CODE applied to OP0 (and OP1 if it is a binary
126 If the last insn does not set TARGET, don't do anything, but return 1.
128 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
129 don't add the REG_EQUAL note but return 0. Our caller can then try
130 again, ensuring that TARGET is not one of the operands. */
133 add_equal_note (seq
, target
, code
, op0
, op1
)
143 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
144 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
145 || GET_CODE (seq
) != SEQUENCE
146 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
147 || GET_CODE (target
) == ZERO_EXTRACT
148 || (! rtx_equal_p (SET_DEST (set
), target
)
149 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
151 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
152 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
156 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
157 besides the last insn. */
158 if (reg_overlap_mentioned_p (target
, op0
)
159 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
160 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
161 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
164 if (GET_RTX_CLASS (code
) == '1')
165 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
167 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
169 set_unique_reg_note (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1), REG_EQUAL
, note
);
174 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
175 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
176 not actually do a sign-extend or zero-extend, but can leave the
177 higher-order bits of the result rtx undefined, for example, in the case
178 of logical operations, but not right shifts. */
181 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
183 enum machine_mode mode
, oldmode
;
189 /* If we must extend do so. If OP is either a constant or a SUBREG
190 for a promoted object, also extend since it will be more efficient to
193 || GET_MODE (op
) == VOIDmode
194 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)))
195 return convert_modes (mode
, oldmode
, op
, unsignedp
);
197 /* If MODE is no wider than a single word, we return a paradoxical
199 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
200 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
202 /* Otherwise, get an object of MODE, clobber it, and set the low-order
205 result
= gen_reg_rtx (mode
);
206 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
207 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
211 /* Generate code to perform a straightforward complex divide. */
214 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
215 unsignedp
, methods
, class, binoptab
)
216 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
217 enum machine_mode submode
;
219 enum optab_methods methods
;
220 enum mode_class
class;
227 optab this_add_optab
= add_optab
;
228 optab this_sub_optab
= sub_optab
;
229 optab this_neg_optab
= neg_optab
;
230 optab this_mul_optab
= smul_optab
;
232 if (binoptab
== sdivv_optab
)
234 this_add_optab
= addv_optab
;
235 this_sub_optab
= subv_optab
;
236 this_neg_optab
= negv_optab
;
237 this_mul_optab
= smulv_optab
;
240 /* Don't fetch these from memory more than once. */
241 real0
= force_reg (submode
, real0
);
242 real1
= force_reg (submode
, real1
);
245 imag0
= force_reg (submode
, imag0
);
247 imag1
= force_reg (submode
, imag1
);
249 /* Divisor: c*c + d*d. */
250 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
251 NULL_RTX
, unsignedp
, methods
);
253 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
254 NULL_RTX
, unsignedp
, methods
);
256 if (temp1
== 0 || temp2
== 0)
259 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
260 NULL_RTX
, unsignedp
, methods
);
266 /* Mathematically, ((a)(c-id))/divisor. */
267 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
269 /* Calculate the dividend. */
270 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
271 NULL_RTX
, unsignedp
, methods
);
273 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
274 NULL_RTX
, unsignedp
, methods
);
276 if (real_t
== 0 || imag_t
== 0)
279 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
280 NULL_RTX
, unsignedp
);
284 /* Mathematically, ((a+ib)(c-id))/divider. */
285 /* Calculate the dividend. */
286 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
287 NULL_RTX
, unsignedp
, methods
);
289 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
290 NULL_RTX
, unsignedp
, methods
);
292 if (temp1
== 0 || temp2
== 0)
295 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
296 NULL_RTX
, unsignedp
, methods
);
298 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
299 NULL_RTX
, unsignedp
, methods
);
301 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
302 NULL_RTX
, unsignedp
, methods
);
304 if (temp1
== 0 || temp2
== 0)
307 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
308 NULL_RTX
, unsignedp
, methods
);
310 if (real_t
== 0 || imag_t
== 0)
314 if (class == MODE_COMPLEX_FLOAT
)
315 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
316 realr
, unsignedp
, methods
);
318 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
319 real_t
, divisor
, realr
, unsignedp
);
325 emit_move_insn (realr
, res
);
327 if (class == MODE_COMPLEX_FLOAT
)
328 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
329 imagr
, unsignedp
, methods
);
331 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
332 imag_t
, divisor
, imagr
, unsignedp
);
338 emit_move_insn (imagr
, res
);
343 /* Generate code to perform a wide-input-range-acceptable complex divide. */
346 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
347 unsignedp
, methods
, class, binoptab
)
348 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
349 enum machine_mode submode
;
351 enum optab_methods methods
;
352 enum mode_class
class;
357 rtx temp1
, temp2
, lab1
, lab2
;
358 enum machine_mode mode
;
360 optab this_add_optab
= add_optab
;
361 optab this_sub_optab
= sub_optab
;
362 optab this_neg_optab
= neg_optab
;
363 optab this_mul_optab
= smul_optab
;
365 if (binoptab
== sdivv_optab
)
367 this_add_optab
= addv_optab
;
368 this_sub_optab
= subv_optab
;
369 this_neg_optab
= negv_optab
;
370 this_mul_optab
= smulv_optab
;
373 /* Don't fetch these from memory more than once. */
374 real0
= force_reg (submode
, real0
);
375 real1
= force_reg (submode
, real1
);
378 imag0
= force_reg (submode
, imag0
);
380 imag1
= force_reg (submode
, imag1
);
382 /* XXX What's an "unsigned" complex number? */
390 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
391 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
394 if (temp1
== 0 || temp2
== 0)
397 mode
= GET_MODE (temp1
);
398 lab1
= gen_label_rtx ();
399 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
400 mode
, unsignedp
, lab1
);
402 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
404 if (class == MODE_COMPLEX_FLOAT
)
405 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
406 NULL_RTX
, unsignedp
, methods
);
408 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
409 imag1
, real1
, NULL_RTX
, unsignedp
);
414 /* Calculate divisor. */
416 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
417 NULL_RTX
, unsignedp
, methods
);
422 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
423 NULL_RTX
, unsignedp
, methods
);
428 /* Calculate dividend. */
434 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
436 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
437 NULL_RTX
, unsignedp
, methods
);
442 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
443 NULL_RTX
, unsignedp
);
445 if (real_t
== 0 || imag_t
== 0)
450 /* Compute (a+ib)/(c+id) as
451 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
453 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
454 NULL_RTX
, unsignedp
, methods
);
459 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
460 NULL_RTX
, unsignedp
, methods
);
462 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
463 NULL_RTX
, unsignedp
, methods
);
468 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
469 NULL_RTX
, unsignedp
, methods
);
471 if (real_t
== 0 || imag_t
== 0)
475 if (class == MODE_COMPLEX_FLOAT
)
476 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
477 realr
, unsignedp
, methods
);
479 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
480 real_t
, divisor
, realr
, unsignedp
);
486 emit_move_insn (realr
, res
);
488 if (class == MODE_COMPLEX_FLOAT
)
489 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
490 imagr
, unsignedp
, methods
);
492 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
493 imag_t
, divisor
, imagr
, unsignedp
);
499 emit_move_insn (imagr
, res
);
501 lab2
= gen_label_rtx ();
502 emit_jump_insn (gen_jump (lab2
));
507 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
509 if (class == MODE_COMPLEX_FLOAT
)
510 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
511 NULL_RTX
, unsignedp
, methods
);
513 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
514 real1
, imag1
, NULL_RTX
, unsignedp
);
519 /* Calculate divisor. */
521 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
522 NULL_RTX
, unsignedp
, methods
);
527 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
528 NULL_RTX
, unsignedp
, methods
);
533 /* Calculate dividend. */
537 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
539 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
540 NULL_RTX
, unsignedp
, methods
);
542 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
543 NULL_RTX
, unsignedp
);
545 if (real_t
== 0 || imag_t
== 0)
550 /* Compute (a+ib)/(c+id) as
551 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
553 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
554 NULL_RTX
, unsignedp
, methods
);
559 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
560 NULL_RTX
, unsignedp
, methods
);
562 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
563 NULL_RTX
, unsignedp
, methods
);
568 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
569 NULL_RTX
, unsignedp
, methods
);
571 if (real_t
== 0 || imag_t
== 0)
575 if (class == MODE_COMPLEX_FLOAT
)
576 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
577 realr
, unsignedp
, methods
);
579 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
580 real_t
, divisor
, realr
, unsignedp
);
586 emit_move_insn (realr
, res
);
588 if (class == MODE_COMPLEX_FLOAT
)
589 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
590 imagr
, unsignedp
, methods
);
592 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
593 imag_t
, divisor
, imagr
, unsignedp
);
599 emit_move_insn (imagr
, res
);
606 /* Wrapper around expand_binop which takes an rtx code to specify
607 the operation to perform, not an optab pointer. All other
608 arguments are the same. */
610 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
611 enum machine_mode mode
;
616 enum optab_methods methods
;
618 optab binop
= code_to_optab
[(int) code
];
622 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
625 /* Generate code to perform an operation specified by BINOPTAB
626 on operands OP0 and OP1, with result having machine-mode MODE.
628 UNSIGNEDP is for the case where we have to widen the operands
629 to perform the operation. It says to use zero-extension.
631 If TARGET is nonzero, the value
632 is generated there, if it is convenient to do so.
633 In all cases an rtx is returned for the locus of the value;
634 this may or may not be TARGET. */
637 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
638 enum machine_mode mode
;
643 enum optab_methods methods
;
645 enum optab_methods next_methods
646 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
647 ? OPTAB_WIDEN
: methods
);
648 enum mode_class
class;
649 enum machine_mode wider_mode
;
651 int commutative_op
= 0;
652 int shift_op
= (binoptab
->code
== ASHIFT
653 || binoptab
->code
== ASHIFTRT
654 || binoptab
->code
== LSHIFTRT
655 || binoptab
->code
== ROTATE
656 || binoptab
->code
== ROTATERT
);
657 rtx entry_last
= get_last_insn ();
660 class = GET_MODE_CLASS (mode
);
662 op0
= protect_from_queue (op0
, 0);
663 op1
= protect_from_queue (op1
, 0);
665 target
= protect_from_queue (target
, 1);
669 op0
= force_not_mem (op0
);
670 op1
= force_not_mem (op1
);
673 /* If subtracting an integer constant, convert this into an addition of
674 the negated constant. */
676 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
678 op1
= negate_rtx (mode
, op1
);
679 binoptab
= add_optab
;
682 /* If we are inside an appropriately-short loop and one operand is an
683 expensive constant, force it into a register. */
684 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
685 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
686 op0
= force_reg (mode
, op0
);
688 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
689 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
690 op1
= force_reg (mode
, op1
);
692 /* Record where to delete back to if we backtrack. */
693 last
= get_last_insn ();
695 /* If operation is commutative,
696 try to make the first operand a register.
697 Even better, try to make it the same as the target.
698 Also try to make the last operand a constant. */
699 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
700 || binoptab
== smul_widen_optab
701 || binoptab
== umul_widen_optab
702 || binoptab
== smul_highpart_optab
703 || binoptab
== umul_highpart_optab
)
707 if (((target
== 0 || GET_CODE (target
) == REG
)
708 ? ((GET_CODE (op1
) == REG
709 && GET_CODE (op0
) != REG
)
711 : rtx_equal_p (op1
, target
))
712 || GET_CODE (op0
) == CONST_INT
)
720 /* If we can do it with a three-operand insn, do so. */
722 if (methods
!= OPTAB_MUST_WIDEN
723 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
725 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
726 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
727 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
729 rtx xop0
= op0
, xop1
= op1
;
734 temp
= gen_reg_rtx (mode
);
736 /* If it is a commutative operator and the modes would match
737 if we would swap the operands, we can save the conversions. */
740 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
741 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
745 tmp
= op0
; op0
= op1
; op1
= tmp
;
746 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
750 /* In case the insn wants input operands in modes different from
751 the result, convert the operands. It would seem that we
752 don't need to convert CONST_INTs, but we do, so that they're
753 a properly sign-extended for their modes; we choose the
754 widest mode between mode and mode[01], so that, in a widening
755 operation, we call convert_modes with different FROM and TO
756 modes, which ensures the value is sign-extended. Shift
757 operations are an exception, because the second operand needs
758 not be extended to the mode of the result. */
760 if (GET_MODE (op0
) != mode0
761 && mode0
!= VOIDmode
)
762 xop0
= convert_modes (mode0
,
763 GET_MODE (op0
) != VOIDmode
765 : GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode0
)
770 if (GET_MODE (xop1
) != mode1
771 && mode1
!= VOIDmode
)
772 xop1
= convert_modes (mode1
,
773 GET_MODE (op1
) != VOIDmode
775 : (GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode1
)
781 /* Now, if insn's predicates don't allow our operands, put them into
784 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
785 && mode0
!= VOIDmode
)
786 xop0
= copy_to_mode_reg (mode0
, xop0
);
788 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
789 && mode1
!= VOIDmode
)
790 xop1
= copy_to_mode_reg (mode1
, xop1
);
792 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
793 temp
= gen_reg_rtx (mode
);
795 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
798 /* If PAT is a multi-insn sequence, try to add an appropriate
799 REG_EQUAL note to it. If we can't because TEMP conflicts with an
800 operand, call ourselves again, this time without a target. */
801 if (GET_CODE (pat
) == SEQUENCE
802 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
804 delete_insns_since (last
);
805 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
813 delete_insns_since (last
);
816 /* If this is a multiply, see if we can do a widening operation that
817 takes operands of this mode and makes a wider mode. */
819 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
820 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
821 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
822 != CODE_FOR_nothing
))
824 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
825 unsignedp
? umul_widen_optab
: smul_widen_optab
,
826 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
830 if (GET_MODE_CLASS (mode
) == MODE_INT
)
831 return gen_lowpart (mode
, temp
);
833 return convert_to_mode (mode
, temp
, unsignedp
);
837 /* Look for a wider mode of the same class for which we think we
838 can open-code the operation. Check for a widening multiply at the
839 wider mode as well. */
841 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
842 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
843 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
844 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
846 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
847 || (binoptab
== smul_optab
848 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
849 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
850 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
851 != CODE_FOR_nothing
)))
853 rtx xop0
= op0
, xop1
= op1
;
856 /* For certain integer operations, we need not actually extend
857 the narrow operands, as long as we will truncate
858 the results to the same narrowness. */
860 if ((binoptab
== ior_optab
|| binoptab
== and_optab
861 || binoptab
== xor_optab
862 || binoptab
== add_optab
|| binoptab
== sub_optab
863 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
864 && class == MODE_INT
)
867 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
869 /* The second operand of a shift must always be extended. */
870 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
871 no_extend
&& binoptab
!= ashl_optab
);
873 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
874 unsignedp
, OPTAB_DIRECT
);
877 if (class != MODE_INT
)
880 target
= gen_reg_rtx (mode
);
881 convert_move (target
, temp
, 0);
885 return gen_lowpart (mode
, temp
);
888 delete_insns_since (last
);
892 /* These can be done a word at a time. */
893 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
895 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
896 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
902 /* If TARGET is the same as one of the operands, the REG_EQUAL note
903 won't be accurate, so use a new target. */
904 if (target
== 0 || target
== op0
|| target
== op1
)
905 target
= gen_reg_rtx (mode
);
909 /* Do the actual arithmetic. */
910 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
912 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
913 rtx x
= expand_binop (word_mode
, binoptab
,
914 operand_subword_force (op0
, i
, mode
),
915 operand_subword_force (op1
, i
, mode
),
916 target_piece
, unsignedp
, next_methods
);
921 if (target_piece
!= x
)
922 emit_move_insn (target_piece
, x
);
925 insns
= get_insns ();
928 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
930 if (binoptab
->code
!= UNKNOWN
)
932 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
933 copy_rtx (op0
), copy_rtx (op1
));
937 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
942 /* Synthesize double word shifts from single word shifts. */
943 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
944 || binoptab
== ashr_optab
)
946 && GET_CODE (op1
) == CONST_INT
947 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
948 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
949 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
950 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
952 rtx insns
, inter
, equiv_value
;
953 rtx into_target
, outof_target
;
954 rtx into_input
, outof_input
;
955 int shift_count
, left_shift
, outof_word
;
957 /* If TARGET is the same as one of the operands, the REG_EQUAL note
958 won't be accurate, so use a new target. */
959 if (target
== 0 || target
== op0
|| target
== op1
)
960 target
= gen_reg_rtx (mode
);
964 shift_count
= INTVAL (op1
);
966 /* OUTOF_* is the word we are shifting bits away from, and
967 INTO_* is the word that we are shifting bits towards, thus
968 they differ depending on the direction of the shift and
971 left_shift
= binoptab
== ashl_optab
;
972 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
974 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
975 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
977 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
978 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
980 if (shift_count
>= BITS_PER_WORD
)
982 inter
= expand_binop (word_mode
, binoptab
,
984 GEN_INT (shift_count
- BITS_PER_WORD
),
985 into_target
, unsignedp
, next_methods
);
987 if (inter
!= 0 && inter
!= into_target
)
988 emit_move_insn (into_target
, inter
);
990 /* For a signed right shift, we must fill the word we are shifting
991 out of with copies of the sign bit. Otherwise it is zeroed. */
992 if (inter
!= 0 && binoptab
!= ashr_optab
)
993 inter
= CONST0_RTX (word_mode
);
995 inter
= expand_binop (word_mode
, binoptab
,
997 GEN_INT (BITS_PER_WORD
- 1),
998 outof_target
, unsignedp
, next_methods
);
1000 if (inter
!= 0 && inter
!= outof_target
)
1001 emit_move_insn (outof_target
, inter
);
1006 optab reverse_unsigned_shift
, unsigned_shift
;
1008 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1009 we must do a logical shift in the opposite direction of the
1012 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1014 /* For a shift of less than BITS_PER_WORD, to compute the word
1015 shifted towards, we need to unsigned shift the orig value of
1018 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1020 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1022 GEN_INT (BITS_PER_WORD
- shift_count
),
1023 0, unsignedp
, next_methods
);
1028 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1029 op1
, 0, unsignedp
, next_methods
);
1032 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1033 into_target
, unsignedp
, next_methods
);
1035 if (inter
!= 0 && inter
!= into_target
)
1036 emit_move_insn (into_target
, inter
);
1039 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1040 op1
, outof_target
, unsignedp
, next_methods
);
1042 if (inter
!= 0 && inter
!= outof_target
)
1043 emit_move_insn (outof_target
, inter
);
1046 insns
= get_insns ();
1051 if (binoptab
->code
!= UNKNOWN
)
1052 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1056 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1061 /* Synthesize double word rotates from single word shifts. */
1062 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1063 && class == MODE_INT
1064 && GET_CODE (op1
) == CONST_INT
1065 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1066 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1067 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1069 rtx insns
, equiv_value
;
1070 rtx into_target
, outof_target
;
1071 rtx into_input
, outof_input
;
1073 int shift_count
, left_shift
, outof_word
;
1075 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1076 won't be accurate, so use a new target. */
1077 if (target
== 0 || target
== op0
|| target
== op1
)
1078 target
= gen_reg_rtx (mode
);
1082 shift_count
= INTVAL (op1
);
1084 /* OUTOF_* is the word we are shifting bits away from, and
1085 INTO_* is the word that we are shifting bits towards, thus
1086 they differ depending on the direction of the shift and
1087 WORDS_BIG_ENDIAN. */
1089 left_shift
= (binoptab
== rotl_optab
);
1090 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1092 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1093 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1095 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1096 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1098 if (shift_count
== BITS_PER_WORD
)
1100 /* This is just a word swap. */
1101 emit_move_insn (outof_target
, into_input
);
1102 emit_move_insn (into_target
, outof_input
);
1107 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1108 rtx first_shift_count
, second_shift_count
;
1109 optab reverse_unsigned_shift
, unsigned_shift
;
1111 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1112 ? lshr_optab
: ashl_optab
);
1114 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1115 ? ashl_optab
: lshr_optab
);
1117 if (shift_count
> BITS_PER_WORD
)
1119 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1120 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
1124 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1125 second_shift_count
= GEN_INT (shift_count
);
1128 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1129 outof_input
, first_shift_count
,
1130 NULL_RTX
, unsignedp
, next_methods
);
1131 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1132 into_input
, second_shift_count
,
1133 into_target
, unsignedp
, next_methods
);
1135 if (into_temp1
!= 0 && into_temp2
!= 0)
1136 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1137 into_target
, unsignedp
, next_methods
);
1141 if (inter
!= 0 && inter
!= into_target
)
1142 emit_move_insn (into_target
, inter
);
1144 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1145 into_input
, first_shift_count
,
1146 NULL_RTX
, unsignedp
, next_methods
);
1147 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1148 outof_input
, second_shift_count
,
1149 outof_target
, unsignedp
, next_methods
);
1151 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1152 inter
= expand_binop (word_mode
, ior_optab
,
1153 outof_temp1
, outof_temp2
,
1154 outof_target
, unsignedp
, next_methods
);
1156 if (inter
!= 0 && inter
!= outof_target
)
1157 emit_move_insn (outof_target
, inter
);
1160 insns
= get_insns ();
1165 if (binoptab
->code
!= UNKNOWN
)
1166 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1170 /* We can't make this a no conflict block if this is a word swap,
1171 because the word swap case fails if the input and output values
1172 are in the same register. */
1173 if (shift_count
!= BITS_PER_WORD
)
1174 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1183 /* These can be done a word at a time by propagating carries. */
1184 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1185 && class == MODE_INT
1186 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1187 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1190 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1191 unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1192 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1195 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1196 value is one of those, use it. Otherwise, use 1 since it is the
1197 one easiest to get. */
1198 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1199 int normalizep
= STORE_FLAG_VALUE
;
1204 /* Prepare the operands. */
1205 xop0
= force_reg (mode
, op0
);
1206 xop1
= force_reg (mode
, op1
);
1208 if (target
== 0 || GET_CODE (target
) != REG
1209 || target
== xop0
|| target
== xop1
)
1210 target
= gen_reg_rtx (mode
);
1212 /* Indicate for flow that the entire target reg is being set. */
1213 if (GET_CODE (target
) == REG
)
1214 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
1216 /* Do the actual arithmetic. */
1217 for (i
= 0; i
< nwords
; i
++)
1219 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1220 rtx target_piece
= operand_subword (target
, index
, 1, mode
);
1221 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1222 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1225 /* Main add/subtract of the input operands. */
1226 x
= expand_binop (word_mode
, binoptab
,
1227 op0_piece
, op1_piece
,
1228 target_piece
, unsignedp
, next_methods
);
1234 /* Store carry from main add/subtract. */
1235 carry_out
= gen_reg_rtx (word_mode
);
1236 carry_out
= emit_store_flag_force (carry_out
,
1237 (binoptab
== add_optab
1240 word_mode
, 1, normalizep
);
1247 /* Add/subtract previous carry to main result. */
1248 newx
= expand_binop (word_mode
,
1249 normalizep
== 1 ? binoptab
: otheroptab
,
1251 NULL_RTX
, 1, next_methods
);
1255 /* Get out carry from adding/subtracting carry in. */
1256 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1257 carry_tmp
= emit_store_flag_force (carry_tmp
,
1258 (binoptab
== add_optab
1261 word_mode
, 1, normalizep
);
1263 /* Logical-ior the two poss. carry together. */
1264 carry_out
= expand_binop (word_mode
, ior_optab
,
1265 carry_out
, carry_tmp
,
1266 carry_out
, 0, next_methods
);
1270 emit_move_insn (target_piece
, newx
);
1273 carry_in
= carry_out
;
1276 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1278 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1280 rtx temp
= emit_move_insn (target
, target
);
1282 set_unique_reg_note (temp
,
1284 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1293 delete_insns_since (last
);
1296 /* If we want to multiply two two-word values and have normal and widening
1297 multiplies of single-word values, we can do this with three smaller
1298 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1299 because we are not operating on one word at a time.
1301 The multiplication proceeds as follows:
1302 _______________________
1303 [__op0_high_|__op0_low__]
1304 _______________________
1305 * [__op1_high_|__op1_low__]
1306 _______________________________________________
1307 _______________________
1308 (1) [__op0_low__*__op1_low__]
1309 _______________________
1310 (2a) [__op0_low__*__op1_high_]
1311 _______________________
1312 (2b) [__op0_high_*__op1_low__]
1313 _______________________
1314 (3) [__op0_high_*__op1_high_]
1317 This gives a 4-word result. Since we are only interested in the
1318 lower 2 words, partial result (3) and the upper words of (2a) and
1319 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1320 calculated using non-widening multiplication.
1322 (1), however, needs to be calculated with an unsigned widening
1323 multiplication. If this operation is not directly supported we
1324 try using a signed widening multiplication and adjust the result.
1325 This adjustment works as follows:
1327 If both operands are positive then no adjustment is needed.
1329 If the operands have different signs, for example op0_low < 0 and
1330 op1_low >= 0, the instruction treats the most significant bit of
1331 op0_low as a sign bit instead of a bit with significance
1332 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1333 with 2**BITS_PER_WORD - op0_low, and two's complements the
1334 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1337 Similarly, if both operands are negative, we need to add
1338 (op0_low + op1_low) * 2**BITS_PER_WORD.
1340 We use a trick to adjust quickly. We logically shift op0_low right
1341 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1342 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1343 logical shift exists, we do an arithmetic right shift and subtract
1346 if (binoptab
== smul_optab
1347 && class == MODE_INT
1348 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1349 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1350 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1351 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1352 != CODE_FOR_nothing
)
1353 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1354 != CODE_FOR_nothing
)))
1356 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1357 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1358 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1359 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1360 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1361 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1363 rtx op0_xhigh
= NULL_RTX
;
1364 rtx op1_xhigh
= NULL_RTX
;
1366 /* If the target is the same as one of the inputs, don't use it. This
1367 prevents problems with the REG_EQUAL note. */
1368 if (target
== op0
|| target
== op1
1369 || (target
!= 0 && GET_CODE (target
) != REG
))
1372 /* Multiply the two lower words to get a double-word product.
1373 If unsigned widening multiplication is available, use that;
1374 otherwise use the signed form and compensate. */
1376 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1378 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1379 target
, 1, OPTAB_DIRECT
);
1381 /* If we didn't succeed, delete everything we did so far. */
1383 delete_insns_since (last
);
1385 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1389 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1390 != CODE_FOR_nothing
)
1392 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1393 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1394 target
, 1, OPTAB_DIRECT
);
1395 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1396 NULL_RTX
, 1, next_methods
);
1398 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1399 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1402 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1403 NULL_RTX
, 0, next_methods
);
1405 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1406 op0_xhigh
, op0_xhigh
, 0,
1410 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1411 NULL_RTX
, 1, next_methods
);
1413 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1414 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1417 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1418 NULL_RTX
, 0, next_methods
);
1420 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1421 op1_xhigh
, op1_xhigh
, 0,
1426 /* If we have been able to directly compute the product of the
1427 low-order words of the operands and perform any required adjustments
1428 of the operands, we proceed by trying two more multiplications
1429 and then computing the appropriate sum.
1431 We have checked above that the required addition is provided.
1432 Full-word addition will normally always succeed, especially if
1433 it is provided at all, so we don't worry about its failure. The
1434 multiplication may well fail, however, so we do handle that. */
1436 if (product
&& op0_xhigh
&& op1_xhigh
)
1438 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1439 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1440 NULL_RTX
, 0, OPTAB_DIRECT
);
1443 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1444 product_high
, 0, next_methods
);
1446 if (temp
!= 0 && temp
!= product_high
)
1447 emit_move_insn (product_high
, temp
);
1450 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1451 NULL_RTX
, 0, OPTAB_DIRECT
);
1454 temp
= expand_binop (word_mode
, add_optab
, temp
,
1455 product_high
, product_high
,
1458 if (temp
!= 0 && temp
!= product_high
)
1459 emit_move_insn (product_high
, temp
);
1463 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1465 temp
= emit_move_insn (product
, product
);
1466 set_unique_reg_note (temp
,
1468 gen_rtx_fmt_ee (MULT
, mode
,
1477 /* If we get here, we couldn't do it for some reason even though we
1478 originally thought we could. Delete anything we've emitted in
1481 delete_insns_since (last
);
1484 /* We need to open-code the complex type operations: '+, -, * and /' */
1486 /* At this point we allow operations between two similar complex
1487 numbers, and also if one of the operands is not a complex number
1488 but rather of MODE_FLOAT or MODE_INT. However, the caller
1489 must make sure that the MODE of the non-complex operand matches
1490 the SUBMODE of the complex operand. */
1492 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1494 rtx real0
= 0, imag0
= 0;
1495 rtx real1
= 0, imag1
= 0;
1496 rtx realr
, imagr
, res
;
1501 /* Find the correct mode for the real and imaginary parts */
1502 enum machine_mode submode
1503 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1504 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1507 if (submode
== BLKmode
)
1511 target
= gen_reg_rtx (mode
);
1515 realr
= gen_realpart (submode
, target
);
1516 imagr
= gen_imagpart (submode
, target
);
1518 if (GET_MODE (op0
) == mode
)
1520 real0
= gen_realpart (submode
, op0
);
1521 imag0
= gen_imagpart (submode
, op0
);
1526 if (GET_MODE (op1
) == mode
)
1528 real1
= gen_realpart (submode
, op1
);
1529 imag1
= gen_imagpart (submode
, op1
);
1534 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0|| imag1
!= 0))
1537 switch (binoptab
->code
)
1540 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1542 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1543 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1544 realr
, unsignedp
, methods
);
1548 else if (res
!= realr
)
1549 emit_move_insn (realr
, res
);
1552 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1553 imagr
, unsignedp
, methods
);
1556 else if (binoptab
->code
== MINUS
)
1557 res
= expand_unop (submode
,
1558 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1559 imag1
, imagr
, unsignedp
);
1565 else if (res
!= imagr
)
1566 emit_move_insn (imagr
, res
);
1572 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1578 /* Don't fetch these from memory more than once. */
1579 real0
= force_reg (submode
, real0
);
1580 real1
= force_reg (submode
, real1
);
1581 imag0
= force_reg (submode
, imag0
);
1582 imag1
= force_reg (submode
, imag1
);
1584 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1585 unsignedp
, methods
);
1587 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1588 unsignedp
, methods
);
1590 if (temp1
== 0 || temp2
== 0)
1595 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1596 temp1
, temp2
, realr
, unsignedp
, methods
));
1600 else if (res
!= realr
)
1601 emit_move_insn (realr
, res
);
1603 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1604 NULL_RTX
, unsignedp
, methods
);
1606 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1607 NULL_RTX
, unsignedp
, methods
);
1609 if (temp1
== 0 || temp2
== 0)
1614 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1615 temp1
, temp2
, imagr
, unsignedp
, methods
));
1619 else if (res
!= imagr
)
1620 emit_move_insn (imagr
, res
);
1626 /* Don't fetch these from memory more than once. */
1627 real0
= force_reg (submode
, real0
);
1628 real1
= force_reg (submode
, real1
);
1630 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1631 realr
, unsignedp
, methods
);
1634 else if (res
!= realr
)
1635 emit_move_insn (realr
, res
);
1638 res
= expand_binop (submode
, binoptab
,
1639 real1
, imag0
, imagr
, unsignedp
, methods
);
1641 res
= expand_binop (submode
, binoptab
,
1642 real0
, imag1
, imagr
, unsignedp
, methods
);
1646 else if (res
!= imagr
)
1647 emit_move_insn (imagr
, res
);
1654 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1658 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1660 /* Don't fetch these from memory more than once. */
1661 real1
= force_reg (submode
, real1
);
1663 /* Simply divide the real and imaginary parts by `c' */
1664 if (class == MODE_COMPLEX_FLOAT
)
1665 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1666 realr
, unsignedp
, methods
);
1668 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1669 real0
, real1
, realr
, unsignedp
);
1673 else if (res
!= realr
)
1674 emit_move_insn (realr
, res
);
1676 if (class == MODE_COMPLEX_FLOAT
)
1677 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1678 imagr
, unsignedp
, methods
);
1680 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1681 imag0
, real1
, imagr
, unsignedp
);
1685 else if (res
!= imagr
)
1686 emit_move_insn (imagr
, res
);
1692 switch (flag_complex_divide_method
)
1695 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1696 realr
, imagr
, submode
,
1702 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1703 realr
, imagr
, submode
,
1723 if (binoptab
->code
!= UNKNOWN
)
1725 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1726 copy_rtx (op0
), copy_rtx (op1
));
1730 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1736 /* It can't be open-coded in this mode.
1737 Use a library call if one is available and caller says that's ok. */
1739 if (binoptab
->handlers
[(int) mode
].libfunc
1740 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1744 enum machine_mode op1_mode
= mode
;
1751 op1_mode
= word_mode
;
1752 /* Specify unsigned here,
1753 since negative shift counts are meaningless. */
1754 op1x
= convert_to_mode (word_mode
, op1
, 1);
1757 if (GET_MODE (op0
) != VOIDmode
1758 && GET_MODE (op0
) != mode
)
1759 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1761 /* Pass 1 for NO_QUEUE so we don't lose any increments
1762 if the libcall is cse'd or moved. */
1763 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1764 NULL_RTX
, LCT_CONST
, mode
, 2,
1765 op0
, mode
, op1x
, op1_mode
);
1767 insns
= get_insns ();
1770 target
= gen_reg_rtx (mode
);
1771 emit_libcall_block (insns
, target
, value
,
1772 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1777 delete_insns_since (last
);
1779 /* It can't be done in this mode. Can we do it in a wider mode? */
1781 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1782 || methods
== OPTAB_MUST_WIDEN
))
1784 /* Caller says, don't even try. */
1785 delete_insns_since (entry_last
);
1789 /* Compute the value of METHODS to pass to recursive calls.
1790 Don't allow widening to be tried recursively. */
1792 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1794 /* Look for a wider mode of the same class for which it appears we can do
1797 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1799 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1800 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1802 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1803 != CODE_FOR_nothing
)
1804 || (methods
== OPTAB_LIB
1805 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1807 rtx xop0
= op0
, xop1
= op1
;
1810 /* For certain integer operations, we need not actually extend
1811 the narrow operands, as long as we will truncate
1812 the results to the same narrowness. */
1814 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1815 || binoptab
== xor_optab
1816 || binoptab
== add_optab
|| binoptab
== sub_optab
1817 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1818 && class == MODE_INT
)
1821 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1822 unsignedp
, no_extend
);
1824 /* The second operand of a shift must always be extended. */
1825 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1826 no_extend
&& binoptab
!= ashl_optab
);
1828 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1829 unsignedp
, methods
);
1832 if (class != MODE_INT
)
1835 target
= gen_reg_rtx (mode
);
1836 convert_move (target
, temp
, 0);
1840 return gen_lowpart (mode
, temp
);
1843 delete_insns_since (last
);
1848 delete_insns_since (entry_last
);
1852 /* Expand a binary operator which has both signed and unsigned forms.
1853 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1856 If we widen unsigned operands, we may use a signed wider operation instead
1857 of an unsigned wider operation, since the result would be the same. */
1860 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1861 enum machine_mode mode
;
1862 optab uoptab
, soptab
;
1863 rtx op0
, op1
, target
;
1865 enum optab_methods methods
;
1868 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1869 struct optab wide_soptab
;
1871 /* Do it without widening, if possible. */
1872 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1873 unsignedp
, OPTAB_DIRECT
);
1874 if (temp
|| methods
== OPTAB_DIRECT
)
1877 /* Try widening to a signed int. Make a fake signed optab that
1878 hides any signed insn for direct use. */
1879 wide_soptab
= *soptab
;
1880 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1881 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1883 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1884 unsignedp
, OPTAB_WIDEN
);
1886 /* For unsigned operands, try widening to an unsigned int. */
1887 if (temp
== 0 && unsignedp
)
1888 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1889 unsignedp
, OPTAB_WIDEN
);
1890 if (temp
|| methods
== OPTAB_WIDEN
)
1893 /* Use the right width lib call if that exists. */
1894 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1895 if (temp
|| methods
== OPTAB_LIB
)
1898 /* Must widen and use a lib call, use either signed or unsigned. */
1899 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1900 unsignedp
, methods
);
1904 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1905 unsignedp
, methods
);
1909 /* Generate code to perform an operation specified by BINOPTAB
1910 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1911 We assume that the order of the operands for the instruction
1912 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1913 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1915 Either TARG0 or TARG1 may be zero, but what that means is that
1916 the result is not actually wanted. We will generate it into
1917 a dummy pseudo-reg and discard it. They may not both be zero.
1919 Returns 1 if this operation can be performed; 0 if not. */
1922 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1928 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1929 enum mode_class
class;
1930 enum machine_mode wider_mode
;
1931 rtx entry_last
= get_last_insn ();
1934 class = GET_MODE_CLASS (mode
);
1936 op0
= protect_from_queue (op0
, 0);
1937 op1
= protect_from_queue (op1
, 0);
1941 op0
= force_not_mem (op0
);
1942 op1
= force_not_mem (op1
);
1945 /* If we are inside an appropriately-short loop and one operand is an
1946 expensive constant, force it into a register. */
1947 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1948 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1949 op0
= force_reg (mode
, op0
);
1951 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1952 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1953 op1
= force_reg (mode
, op1
);
1956 targ0
= protect_from_queue (targ0
, 1);
1958 targ0
= gen_reg_rtx (mode
);
1960 targ1
= protect_from_queue (targ1
, 1);
1962 targ1
= gen_reg_rtx (mode
);
1964 /* Record where to go back to if we fail. */
1965 last
= get_last_insn ();
1967 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1969 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1970 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1971 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1973 rtx xop0
= op0
, xop1
= op1
;
1975 /* In case this insn wants input operands in modes different from the
1976 result, convert the operands. */
1977 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
1978 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1980 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
1981 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
1983 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1984 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1985 xop0
= copy_to_mode_reg (mode0
, xop0
);
1987 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1988 xop1
= copy_to_mode_reg (mode1
, xop1
);
1990 /* We could handle this, but we should always be called with a pseudo
1991 for our targets and all insns should take them as outputs. */
1992 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1993 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1996 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2003 delete_insns_since (last
);
2006 /* It can't be done in this mode. Can we do it in a wider mode? */
2008 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2010 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2011 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2013 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2014 != CODE_FOR_nothing
)
2016 rtx t0
= gen_reg_rtx (wider_mode
);
2017 rtx t1
= gen_reg_rtx (wider_mode
);
2018 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2019 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2021 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2024 convert_move (targ0
, t0
, unsignedp
);
2025 convert_move (targ1
, t1
, unsignedp
);
2029 delete_insns_since (last
);
2034 delete_insns_since (entry_last
);
2038 /* Wrapper around expand_unop which takes an rtx code to specify
2039 the operation to perform, not an optab pointer. All other
2040 arguments are the same. */
2042 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2043 enum machine_mode mode
;
2049 optab unop
= code_to_optab
[(int) code
];
2053 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2056 /* Generate code to perform an operation specified by UNOPTAB
2057 on operand OP0, with result having machine-mode MODE.
2059 UNSIGNEDP is for the case where we have to widen the operands
2060 to perform the operation. It says to use zero-extension.
2062 If TARGET is nonzero, the value
2063 is generated there, if it is convenient to do so.
2064 In all cases an rtx is returned for the locus of the value;
2065 this may or may not be TARGET. */
2068 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2069 enum machine_mode mode
;
2075 enum mode_class
class;
2076 enum machine_mode wider_mode
;
2078 rtx last
= get_last_insn ();
2081 class = GET_MODE_CLASS (mode
);
2083 op0
= protect_from_queue (op0
, 0);
2087 op0
= force_not_mem (op0
);
2091 target
= protect_from_queue (target
, 1);
2093 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2095 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2096 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2102 temp
= gen_reg_rtx (mode
);
2104 if (GET_MODE (xop0
) != VOIDmode
2105 && GET_MODE (xop0
) != mode0
)
2106 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2108 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2110 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2111 xop0
= copy_to_mode_reg (mode0
, xop0
);
2113 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2114 temp
= gen_reg_rtx (mode
);
2116 pat
= GEN_FCN (icode
) (temp
, xop0
);
2119 if (GET_CODE (pat
) == SEQUENCE
2120 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2122 delete_insns_since (last
);
2123 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2131 delete_insns_since (last
);
2134 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2136 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2137 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2138 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2140 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2144 /* For certain operations, we need not actually extend
2145 the narrow operand, as long as we will truncate the
2146 results to the same narrowness. */
2148 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2149 (unoptab
== neg_optab
2150 || unoptab
== one_cmpl_optab
)
2151 && class == MODE_INT
);
2153 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2158 if (class != MODE_INT
)
2161 target
= gen_reg_rtx (mode
);
2162 convert_move (target
, temp
, 0);
2166 return gen_lowpart (mode
, temp
);
2169 delete_insns_since (last
);
2173 /* These can be done a word at a time. */
2174 if (unoptab
== one_cmpl_optab
2175 && class == MODE_INT
2176 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2177 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2182 if (target
== 0 || target
== op0
)
2183 target
= gen_reg_rtx (mode
);
2187 /* Do the actual arithmetic. */
2188 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2190 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2191 rtx x
= expand_unop (word_mode
, unoptab
,
2192 operand_subword_force (op0
, i
, mode
),
2193 target_piece
, unsignedp
);
2194 if (target_piece
!= x
)
2195 emit_move_insn (target_piece
, x
);
2198 insns
= get_insns ();
2201 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2202 gen_rtx_fmt_e (unoptab
->code
, mode
,
2207 /* Open-code the complex negation operation. */
2208 else if (unoptab
->code
== NEG
2209 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2215 /* Find the correct mode for the real and imaginary parts */
2216 enum machine_mode submode
2217 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2218 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2221 if (submode
== BLKmode
)
2225 target
= gen_reg_rtx (mode
);
2229 target_piece
= gen_imagpart (submode
, target
);
2230 x
= expand_unop (submode
, unoptab
,
2231 gen_imagpart (submode
, op0
),
2232 target_piece
, unsignedp
);
2233 if (target_piece
!= x
)
2234 emit_move_insn (target_piece
, x
);
2236 target_piece
= gen_realpart (submode
, target
);
2237 x
= expand_unop (submode
, unoptab
,
2238 gen_realpart (submode
, op0
),
2239 target_piece
, unsignedp
);
2240 if (target_piece
!= x
)
2241 emit_move_insn (target_piece
, x
);
2246 emit_no_conflict_block (seq
, target
, op0
, 0,
2247 gen_rtx_fmt_e (unoptab
->code
, mode
,
2252 /* Now try a library call in this mode. */
2253 if (unoptab
->handlers
[(int) mode
].libfunc
)
2260 /* Pass 1 for NO_QUEUE so we don't lose any increments
2261 if the libcall is cse'd or moved. */
2262 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2263 NULL_RTX
, LCT_CONST
, mode
, 1, op0
, mode
);
2264 insns
= get_insns ();
2267 target
= gen_reg_rtx (mode
);
2268 emit_libcall_block (insns
, target
, value
,
2269 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2274 /* It can't be done in this mode. Can we do it in a wider mode? */
2276 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2278 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2279 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2281 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2282 != CODE_FOR_nothing
)
2283 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2287 /* For certain operations, we need not actually extend
2288 the narrow operand, as long as we will truncate the
2289 results to the same narrowness. */
2291 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2292 (unoptab
== neg_optab
2293 || unoptab
== one_cmpl_optab
)
2294 && class == MODE_INT
);
2296 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2301 if (class != MODE_INT
)
2304 target
= gen_reg_rtx (mode
);
2305 convert_move (target
, temp
, 0);
2309 return gen_lowpart (mode
, temp
);
2312 delete_insns_since (last
);
2317 /* If there is no negate operation, try doing a subtract from zero.
2318 The US Software GOFAST library needs this. */
2319 if (unoptab
->code
== NEG
)
2322 temp
= expand_binop (mode
,
2323 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2324 CONST0_RTX (mode
), op0
,
2325 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2333 /* Emit code to compute the absolute value of OP0, with result to
2334 TARGET if convenient. (TARGET may be 0.) The return value says
2335 where the result actually is to be found.
2337 MODE is the mode of the operand; the mode of the result is
2338 different but can be deduced from MODE.
2343 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2344 enum machine_mode mode
;
2347 int result_unsignedp
;
2353 result_unsignedp
= 1;
2355 /* First try to do it with a special abs instruction. */
2356 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2361 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2362 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2364 rtx last
= get_last_insn ();
2366 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2368 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2374 delete_insns_since (last
);
2377 /* If this machine has expensive jumps, we can do integer absolute
2378 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2379 where W is the width of MODE. */
2381 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2383 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2384 size_int (GET_MODE_BITSIZE (mode
) - 1),
2387 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2390 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2391 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2397 /* If that does not win, use conditional jump and negate. */
2399 /* It is safe to use the target if it is the same
2400 as the source if this is also a pseudo register */
2401 if (op0
== target
&& GET_CODE (op0
) == REG
2402 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2405 op1
= gen_label_rtx ();
2406 if (target
== 0 || ! safe
2407 || GET_MODE (target
) != mode
2408 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2409 || (GET_CODE (target
) == REG
2410 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2411 target
= gen_reg_rtx (mode
);
2413 emit_move_insn (target
, op0
);
2416 /* If this mode is an integer too wide to compare properly,
2417 compare word by word. Rely on CSE to optimize constant cases. */
2418 if (GET_MODE_CLASS (mode
) == MODE_INT
2419 && ! can_compare_p (GE
, mode
, ccp_jump
))
2420 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2423 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2424 NULL_RTX
, NULL_RTX
, op1
);
2426 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2429 emit_move_insn (target
, op0
);
2435 /* Emit code to compute the absolute value of OP0, with result to
2436 TARGET if convenient. (TARGET may be 0.) The return value says
2437 where the result actually is to be found.
2439 MODE is the mode of the operand; the mode of the result is
2440 different but can be deduced from MODE.
2442 UNSIGNEDP is relevant for complex integer modes. */
2445 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2446 enum machine_mode mode
;
2451 enum mode_class
class = GET_MODE_CLASS (mode
);
2452 enum machine_mode wider_mode
;
2454 rtx entry_last
= get_last_insn ();
2457 optab this_abs_optab
;
2459 /* Find the correct mode for the real and imaginary parts. */
2460 enum machine_mode submode
2461 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2462 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2465 if (submode
== BLKmode
)
2468 op0
= protect_from_queue (op0
, 0);
2472 op0
= force_not_mem (op0
);
2475 last
= get_last_insn ();
2478 target
= protect_from_queue (target
, 1);
2480 this_abs_optab
= ! unsignedp
&& flag_trapv
2481 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2482 ? absv_optab
: abs_optab
;
2484 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2486 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2487 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2493 temp
= gen_reg_rtx (submode
);
2495 if (GET_MODE (xop0
) != VOIDmode
2496 && GET_MODE (xop0
) != mode0
)
2497 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2499 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2501 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2502 xop0
= copy_to_mode_reg (mode0
, xop0
);
2504 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2505 temp
= gen_reg_rtx (submode
);
2507 pat
= GEN_FCN (icode
) (temp
, xop0
);
2510 if (GET_CODE (pat
) == SEQUENCE
2511 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2514 delete_insns_since (last
);
2515 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2524 delete_insns_since (last
);
2527 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2529 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2530 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2532 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2533 != CODE_FOR_nothing
)
2537 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2538 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2542 if (class != MODE_COMPLEX_INT
)
2545 target
= gen_reg_rtx (submode
);
2546 convert_move (target
, temp
, 0);
2550 return gen_lowpart (submode
, temp
);
2553 delete_insns_since (last
);
2557 /* Open-code the complex absolute-value operation
2558 if we can open-code sqrt. Otherwise it's not worth while. */
2559 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
2562 rtx real
, imag
, total
;
2564 real
= gen_realpart (submode
, op0
);
2565 imag
= gen_imagpart (submode
, op0
);
2567 /* Square both parts. */
2568 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2569 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2571 /* Sum the parts. */
2572 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2573 0, OPTAB_LIB_WIDEN
);
2575 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2576 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2578 delete_insns_since (last
);
2583 /* Now try a library call in this mode. */
2584 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
2591 /* Pass 1 for NO_QUEUE so we don't lose any increments
2592 if the libcall is cse'd or moved. */
2593 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2594 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
2595 insns
= get_insns ();
2598 target
= gen_reg_rtx (submode
);
2599 emit_libcall_block (insns
, target
, value
,
2600 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
2605 /* It can't be done in this mode. Can we do it in a wider mode? */
2607 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2608 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2610 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2611 != CODE_FOR_nothing
)
2612 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2616 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2618 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2622 if (class != MODE_COMPLEX_INT
)
2625 target
= gen_reg_rtx (submode
);
2626 convert_move (target
, temp
, 0);
2630 return gen_lowpart (submode
, temp
);
2633 delete_insns_since (last
);
2637 delete_insns_since (entry_last
);
2641 /* Generate an instruction whose insn-code is INSN_CODE,
2642 with two operands: an output TARGET and an input OP0.
2643 TARGET *must* be nonzero, and the output is always stored there.
2644 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2645 the value that is stored into TARGET. */
2648 emit_unop_insn (icode
, target
, op0
, code
)
2655 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2658 temp
= target
= protect_from_queue (target
, 1);
2660 op0
= protect_from_queue (op0
, 0);
2662 /* Sign and zero extension from memory is often done specially on
2663 RISC machines, so forcing into a register here can pessimize
2665 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2666 op0
= force_not_mem (op0
);
2668 /* Now, if insn does not accept our operands, put them into pseudos. */
2670 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2671 op0
= copy_to_mode_reg (mode0
, op0
);
2673 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2674 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2675 temp
= gen_reg_rtx (GET_MODE (temp
));
2677 pat
= GEN_FCN (icode
) (temp
, op0
);
2679 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2680 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2685 emit_move_insn (target
, temp
);
2688 /* Emit code to perform a series of operations on a multi-word quantity, one
2691 Such a block is preceded by a CLOBBER of the output, consists of multiple
2692 insns, each setting one word of the output, and followed by a SET copying
2693 the output to itself.
2695 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2696 note indicating that it doesn't conflict with the (also multi-word)
2697 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2700 INSNS is a block of code generated to perform the operation, not including
2701 the CLOBBER and final copy. All insns that compute intermediate values
2702 are first emitted, followed by the block as described above.
2704 TARGET, OP0, and OP1 are the output and inputs of the operations,
2705 respectively. OP1 may be zero for a unary operation.
2707 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2710 If TARGET is not a register, INSNS is simply emitted with no special
2711 processing. Likewise if anything in INSNS is not an INSN or if
2712 there is a libcall block inside INSNS.
2714 The final insn emitted is returned. */
2717 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2723 rtx prev
, next
, first
, last
, insn
;
2725 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2726 return emit_insns (insns
);
2728 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2729 if (GET_CODE (insn
) != INSN
2730 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2731 return emit_insns (insns
);
2733 /* First emit all insns that do not store into words of the output and remove
2734 these from the list. */
2735 for (insn
= insns
; insn
; insn
= next
)
2740 next
= NEXT_INSN (insn
);
2742 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2743 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2744 set
= PATTERN (insn
);
2745 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2747 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2748 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2750 set
= XVECEXP (PATTERN (insn
), 0, i
);
2758 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2760 if (PREV_INSN (insn
))
2761 NEXT_INSN (PREV_INSN (insn
)) = next
;
2766 PREV_INSN (next
) = PREV_INSN (insn
);
2772 prev
= get_last_insn ();
2774 /* Now write the CLOBBER of the output, followed by the setting of each
2775 of the words, followed by the final copy. */
2776 if (target
!= op0
&& target
!= op1
)
2777 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2779 for (insn
= insns
; insn
; insn
= next
)
2781 next
= NEXT_INSN (insn
);
2784 if (op1
&& GET_CODE (op1
) == REG
)
2785 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2788 if (op0
&& GET_CODE (op0
) == REG
)
2789 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2793 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2794 != CODE_FOR_nothing
)
2796 last
= emit_move_insn (target
, target
);
2798 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2802 last
= get_last_insn ();
2804 /* Remove any existing REG_EQUAL note from "last", or else it will
2805 be mistaken for a note referring to the full contents of the
2806 alleged libcall value when found together with the REG_RETVAL
2807 note added below. An existing note can come from an insn
2808 expansion at "last". */
2809 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2813 first
= get_insns ();
2815 first
= NEXT_INSN (prev
);
2817 /* Encapsulate the block so it gets manipulated as a unit. */
2818 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2820 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
2825 /* Emit code to make a call to a constant function or a library call.
2827 INSNS is a list containing all insns emitted in the call.
2828 These insns leave the result in RESULT. Our block is to copy RESULT
2829 to TARGET, which is logically equivalent to EQUIV.
2831 We first emit any insns that set a pseudo on the assumption that these are
2832 loading constants into registers; doing so allows them to be safely cse'ed
2833 between blocks. Then we emit all the other insns in the block, followed by
2834 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2835 note with an operand of EQUIV.
2837 Moving assignments to pseudos outside of the block is done to improve
2838 the generated code, but is not required to generate correct code,
2839 hence being unable to move an assignment is not grounds for not making
2840 a libcall block. There are two reasons why it is safe to leave these
2841 insns inside the block: First, we know that these pseudos cannot be
2842 used in generated RTL outside the block since they are created for
2843 temporary purposes within the block. Second, CSE will not record the
2844 values of anything set inside a libcall block, so we know they must
2845 be dead at the end of the block.
2847 Except for the first group of insns (the ones setting pseudos), the
2848 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2851 emit_libcall_block (insns
, target
, result
, equiv
)
2857 rtx final_dest
= target
;
2858 rtx prev
, next
, first
, last
, insn
;
2860 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
2861 into a MEM later. Protect the libcall block from this change. */
2862 if (! REG_P (target
) || REG_USERVAR_P (target
))
2863 target
= gen_reg_rtx (GET_MODE (target
));
2865 /* If we're using non-call exceptions, a libcall corresponding to an
2866 operation that may trap may also trap. */
2867 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
2869 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2870 if (GET_CODE (insn
) == CALL_INSN
)
2872 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2874 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
2875 remove_note (insn
, note
);
2879 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
2880 reg note to indicate that this call cannot throw or execute a nonlocal
2881 goto (unless there is already a REG_EH_REGION note, in which case
2883 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2884 if (GET_CODE (insn
) == CALL_INSN
)
2886 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2889 XEXP (note
, 0) = GEN_INT (-1);
2891 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
2895 /* First emit all insns that set pseudos. Remove them from the list as
2896 we go. Avoid insns that set pseudos which were referenced in previous
2897 insns. These can be generated by move_by_pieces, for example,
2898 to update an address. Similarly, avoid insns that reference things
2899 set in previous insns. */
2901 for (insn
= insns
; insn
; insn
= next
)
2903 rtx set
= single_set (insn
);
2905 next
= NEXT_INSN (insn
);
2907 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2908 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2910 || ((! INSN_P(insns
)
2911 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
2912 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2913 && ! modified_in_p (SET_SRC (set
), insns
)
2914 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2916 if (PREV_INSN (insn
))
2917 NEXT_INSN (PREV_INSN (insn
)) = next
;
2922 PREV_INSN (next
) = PREV_INSN (insn
);
2928 prev
= get_last_insn ();
2930 /* Write the remaining insns followed by the final copy. */
2932 for (insn
= insns
; insn
; insn
= next
)
2934 next
= NEXT_INSN (insn
);
2939 last
= emit_move_insn (target
, result
);
2940 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2941 != CODE_FOR_nothing
)
2942 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
2945 /* Remove any existing REG_EQUAL note from "last", or else it will
2946 be mistaken for a note referring to the full contents of the
2947 libcall value when found together with the REG_RETVAL note added
2948 below. An existing note can come from an insn expansion at
2950 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2953 if (final_dest
!= target
)
2954 emit_move_insn (final_dest
, target
);
2957 first
= get_insns ();
2959 first
= NEXT_INSN (prev
);
2961 /* Encapsulate the block so it gets manipulated as a unit. */
2962 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
2964 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2966 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
2971 /* Generate code to store zero in X. */
2977 emit_move_insn (x
, const0_rtx
);
2980 /* Generate code to store 1 in X
2981 assuming it contains zero beforehand. */
2984 emit_0_to_1_insn (x
)
2987 emit_move_insn (x
, const1_rtx
);
2990 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
2991 PURPOSE describes how this comparison will be used. CODE is the rtx
2992 comparison code we will be using.
2994 ??? Actually, CODE is slightly weaker than that. A target is still
2995 required to implement all of the normal bcc operations, but not
2996 required to implement all (or any) of the unordered bcc operations. */
2999 can_compare_p (code
, mode
, purpose
)
3001 enum machine_mode mode
;
3002 enum can_compare_purpose purpose
;
3006 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3008 if (purpose
== ccp_jump
)
3009 return bcc_gen_fctn
[(int)code
] != NULL
;
3010 else if (purpose
== ccp_store_flag
)
3011 return setcc_gen_code
[(int)code
] != CODE_FOR_nothing
;
3013 /* There's only one cmov entry point, and it's allowed to fail. */
3016 if (purpose
== ccp_jump
3017 && cbranch_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3019 if (purpose
== ccp_cmov
3020 && cmov_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3022 if (purpose
== ccp_store_flag
3023 && cstore_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3026 mode
= GET_MODE_WIDER_MODE (mode
);
3028 while (mode
!= VOIDmode
);
3033 /* This function is called when we are going to emit a compare instruction that
3034 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3036 *PMODE is the mode of the inputs (in case they are const_int).
3037 *PUNSIGNEDP nonzero says that the operands are unsigned;
3038 this matters if they need to be widened.
3040 If they have mode BLKmode, then SIZE specifies the size of both operands.
3042 This function performs all the setup necessary so that the caller only has
3043 to emit a single comparison insn. This setup can involve doing a BLKmode
3044 comparison or emitting a library call to perform the comparison if no insn
3045 is available to handle it.
3046 The values which are passed in through pointers can be modified; the caller
3047 should perform the comparison on the modified values. */
3050 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3052 enum rtx_code
*pcomparison
;
3054 enum machine_mode
*pmode
;
3056 enum can_compare_purpose purpose
;
3058 enum machine_mode mode
= *pmode
;
3059 rtx x
= *px
, y
= *py
;
3060 int unsignedp
= *punsignedp
;
3061 enum mode_class
class;
3063 class = GET_MODE_CLASS (mode
);
3065 /* They could both be VOIDmode if both args are immediate constants,
3066 but we should fold that at an earlier stage.
3067 With no special code here, this will call abort,
3068 reminding the programmer to implement such folding. */
3070 if (mode
!= BLKmode
&& flag_force_mem
)
3072 x
= force_not_mem (x
);
3073 y
= force_not_mem (y
);
3076 /* If we are inside an appropriately-short loop and one operand is an
3077 expensive constant, force it into a register. */
3078 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3079 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3080 x
= force_reg (mode
, x
);
3082 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3083 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3084 y
= force_reg (mode
, y
);
3087 /* Abort if we have a non-canonical comparison. The RTL documentation
3088 states that canonical comparisons are required only for targets which
3090 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3094 /* Don't let both operands fail to indicate the mode. */
3095 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3096 x
= force_reg (mode
, x
);
3098 /* Handle all BLKmode compares. */
3100 if (mode
== BLKmode
)
3103 enum machine_mode result_mode
;
3104 rtx opalign ATTRIBUTE_UNUSED
3105 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3108 x
= protect_from_queue (x
, 0);
3109 y
= protect_from_queue (y
, 0);
3113 #ifdef HAVE_cmpstrqi
3115 && GET_CODE (size
) == CONST_INT
3116 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3118 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3119 result
= gen_reg_rtx (result_mode
);
3120 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3124 #ifdef HAVE_cmpstrhi
3126 && GET_CODE (size
) == CONST_INT
3127 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3129 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3130 result
= gen_reg_rtx (result_mode
);
3131 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3135 #ifdef HAVE_cmpstrsi
3138 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3139 result
= gen_reg_rtx (result_mode
);
3140 size
= protect_from_queue (size
, 0);
3141 emit_insn (gen_cmpstrsi (result
, x
, y
,
3142 convert_to_mode (SImode
, size
, 1),
3148 #ifdef TARGET_MEM_FUNCTIONS
3149 emit_library_call (memcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3150 TYPE_MODE (integer_type_node
), 3,
3151 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3152 convert_to_mode (TYPE_MODE (sizetype
), size
,
3153 TREE_UNSIGNED (sizetype
)),
3154 TYPE_MODE (sizetype
));
3156 emit_library_call (bcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3157 TYPE_MODE (integer_type_node
), 3,
3158 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3159 convert_to_mode (TYPE_MODE (integer_type_node
),
3161 TREE_UNSIGNED (integer_type_node
)),
3162 TYPE_MODE (integer_type_node
));
3165 /* Immediately move the result of the libcall into a pseudo
3166 register so reload doesn't clobber the value if it needs
3167 the return register for a spill reg. */
3168 result
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
3169 result_mode
= TYPE_MODE (integer_type_node
);
3170 emit_move_insn (result
,
3171 hard_libcall_value (result_mode
));
3175 *pmode
= result_mode
;
3181 if (can_compare_p (*pcomparison
, mode
, purpose
))
3184 /* Handle a lib call just for the mode we are using. */
3186 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3188 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3191 /* If we want unsigned, and this mode has a distinct unsigned
3192 comparison routine, use that. */
3193 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3194 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3196 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
,
3199 /* Immediately move the result of the libcall into a pseudo
3200 register so reload doesn't clobber the value if it needs
3201 the return register for a spill reg. */
3202 result
= gen_reg_rtx (word_mode
);
3203 emit_move_insn (result
, hard_libcall_value (word_mode
));
3205 /* Integer comparison returns a result that must be compared against 1,
3206 so that even if we do an unsigned compare afterward,
3207 there is still a value that can represent the result "less than". */
3214 if (class == MODE_FLOAT
)
3215 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3221 /* Before emitting an insn with code ICODE, make sure that X, which is going
3222 to be used for operand OPNUM of the insn, is converted from mode MODE to
3223 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3224 that it is accepted by the operand predicate. Return the new value. */
3227 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3231 enum machine_mode mode
, wider_mode
;
3234 x
= protect_from_queue (x
, 0);
3236 if (mode
!= wider_mode
)
3237 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3239 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3240 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3241 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3245 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3246 we can do the comparison.
3247 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3248 be NULL_RTX which indicates that only a comparison is to be generated. */
3251 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3253 enum machine_mode mode
;
3254 enum rtx_code comparison
;
3258 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3259 enum mode_class
class = GET_MODE_CLASS (mode
);
3260 enum machine_mode wider_mode
= mode
;
3262 /* Try combined insns first. */
3265 enum insn_code icode
;
3266 PUT_MODE (test
, wider_mode
);
3270 icode
= cbranch_optab
->handlers
[(int)wider_mode
].insn_code
;
3272 if (icode
!= CODE_FOR_nothing
3273 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3275 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3276 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3277 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3282 /* Handle some compares against zero. */
3283 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3284 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3286 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3287 emit_insn (GEN_FCN (icode
) (x
));
3289 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3293 /* Handle compares for which there is a directly suitable insn. */
3295 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3296 if (icode
!= CODE_FOR_nothing
)
3298 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3299 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3300 emit_insn (GEN_FCN (icode
) (x
, y
));
3302 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3306 if (class != MODE_INT
&& class != MODE_FLOAT
3307 && class != MODE_COMPLEX_FLOAT
)
3310 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3311 } while (wider_mode
!= VOIDmode
);
3316 /* Generate code to compare X with Y so that the condition codes are
3317 set and to jump to LABEL if the condition is true. If X is a
3318 constant and Y is not a constant, then the comparison is swapped to
3319 ensure that the comparison RTL has the canonical form.
3321 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3322 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3323 the proper branch condition code.
3325 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3327 MODE is the mode of the inputs (in case they are const_int).
3329 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3330 be passed unchanged to emit_cmp_insn, then potentially converted into an
3331 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3334 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3336 enum rtx_code comparison
;
3338 enum machine_mode mode
;
3342 rtx op0
= x
, op1
= y
;
3344 /* Swap operands and condition to ensure canonical RTL. */
3345 if (swap_commutative_operands_p (x
, y
))
3347 /* If we're not emitting a branch, this means some caller
3353 comparison
= swap_condition (comparison
);
3357 /* If OP0 is still a constant, then both X and Y must be constants. Force
3358 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3360 if (CONSTANT_P (op0
))
3361 op0
= force_reg (mode
, op0
);
3366 comparison
= unsigned_condition (comparison
);
3368 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3370 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3373 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3376 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3378 enum rtx_code comparison
;
3380 enum machine_mode mode
;
3383 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3386 /* Emit a library call comparison between floating point X and Y.
3387 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3390 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3392 enum rtx_code
*pcomparison
;
3393 enum machine_mode
*pmode
;
3396 enum rtx_code comparison
= *pcomparison
;
3397 rtx x
= *px
= protect_from_queue (*px
, 0);
3398 rtx y
= *py
= protect_from_queue (*py
, 0);
3399 enum machine_mode mode
= GET_MODE (x
);
3407 libfunc
= eqhf2_libfunc
;
3411 libfunc
= nehf2_libfunc
;
3415 libfunc
= gthf2_libfunc
;
3419 libfunc
= gehf2_libfunc
;
3423 libfunc
= lthf2_libfunc
;
3427 libfunc
= lehf2_libfunc
;
3431 libfunc
= unordhf2_libfunc
;
3437 else if (mode
== SFmode
)
3441 libfunc
= eqsf2_libfunc
;
3445 libfunc
= nesf2_libfunc
;
3449 libfunc
= gtsf2_libfunc
;
3453 libfunc
= gesf2_libfunc
;
3457 libfunc
= ltsf2_libfunc
;
3461 libfunc
= lesf2_libfunc
;
3465 libfunc
= unordsf2_libfunc
;
3471 else if (mode
== DFmode
)
3475 libfunc
= eqdf2_libfunc
;
3479 libfunc
= nedf2_libfunc
;
3483 libfunc
= gtdf2_libfunc
;
3487 libfunc
= gedf2_libfunc
;
3491 libfunc
= ltdf2_libfunc
;
3495 libfunc
= ledf2_libfunc
;
3499 libfunc
= unorddf2_libfunc
;
3505 else if (mode
== XFmode
)
3509 libfunc
= eqxf2_libfunc
;
3513 libfunc
= nexf2_libfunc
;
3517 libfunc
= gtxf2_libfunc
;
3521 libfunc
= gexf2_libfunc
;
3525 libfunc
= ltxf2_libfunc
;
3529 libfunc
= lexf2_libfunc
;
3533 libfunc
= unordxf2_libfunc
;
3539 else if (mode
== TFmode
)
3543 libfunc
= eqtf2_libfunc
;
3547 libfunc
= netf2_libfunc
;
3551 libfunc
= gttf2_libfunc
;
3555 libfunc
= getf2_libfunc
;
3559 libfunc
= lttf2_libfunc
;
3563 libfunc
= letf2_libfunc
;
3567 libfunc
= unordtf2_libfunc
;
3575 enum machine_mode wider_mode
;
3577 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3578 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3580 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3581 != CODE_FOR_nothing
)
3582 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3584 x
= protect_from_queue (x
, 0);
3585 y
= protect_from_queue (y
, 0);
3586 *px
= convert_to_mode (wider_mode
, x
, 0);
3587 *py
= convert_to_mode (wider_mode
, y
, 0);
3588 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3598 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
, y
,
3601 /* Immediately move the result of the libcall into a pseudo
3602 register so reload doesn't clobber the value if it needs
3603 the return register for a spill reg. */
3604 result
= gen_reg_rtx (word_mode
);
3605 emit_move_insn (result
, hard_libcall_value (word_mode
));
3609 if (comparison
== UNORDERED
)
3611 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
3612 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3618 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3621 emit_indirect_jump (loc
)
3624 if (! ((*insn_data
[(int)CODE_FOR_indirect_jump
].operand
[0].predicate
)
3626 loc
= copy_to_mode_reg (Pmode
, loc
);
3628 emit_jump_insn (gen_indirect_jump (loc
));
3632 #ifdef HAVE_conditional_move
3634 /* Emit a conditional move instruction if the machine supports one for that
3635 condition and machine mode.
3637 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3638 the mode to use should they be constants. If it is VOIDmode, they cannot
3641 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3642 should be stored there. MODE is the mode to use should they be constants.
3643 If it is VOIDmode, they cannot both be constants.
3645 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3646 is not supported. */
3649 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3654 enum machine_mode cmode
;
3656 enum machine_mode mode
;
3659 rtx tem
, subtarget
, comparison
, insn
;
3660 enum insn_code icode
;
3661 enum rtx_code reversed
;
3663 /* If one operand is constant, make it the second one. Only do this
3664 if the other operand is not constant as well. */
3666 if (swap_commutative_operands_p (op0
, op1
))
3671 code
= swap_condition (code
);
3674 /* get_condition will prefer to generate LT and GT even if the old
3675 comparison was against zero, so undo that canonicalization here since
3676 comparisons against zero are cheaper. */
3677 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
3678 code
= LE
, op1
= const0_rtx
;
3679 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
3680 code
= GE
, op1
= const0_rtx
;
3682 if (cmode
== VOIDmode
)
3683 cmode
= GET_MODE (op0
);
3685 if (swap_commutative_operands_p (op2
, op3
)
3686 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3695 if (mode
== VOIDmode
)
3696 mode
= GET_MODE (op2
);
3698 icode
= movcc_gen_code
[mode
];
3700 if (icode
== CODE_FOR_nothing
)
3705 op2
= force_not_mem (op2
);
3706 op3
= force_not_mem (op3
);
3710 target
= protect_from_queue (target
, 1);
3712 target
= gen_reg_rtx (mode
);
3718 op2
= protect_from_queue (op2
, 0);
3719 op3
= protect_from_queue (op3
, 0);
3721 /* If the insn doesn't accept these operands, put them in pseudos. */
3723 if (! (*insn_data
[icode
].operand
[0].predicate
)
3724 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3725 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3727 if (! (*insn_data
[icode
].operand
[2].predicate
)
3728 (op2
, insn_data
[icode
].operand
[2].mode
))
3729 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3731 if (! (*insn_data
[icode
].operand
[3].predicate
)
3732 (op3
, insn_data
[icode
].operand
[3].mode
))
3733 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3735 /* Everything should now be in the suitable form, so emit the compare insn
3736 and then the conditional move. */
3739 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3741 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3742 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3743 return NULL and let the caller figure out how best to deal with this
3745 if (GET_CODE (comparison
) != code
)
3748 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3750 /* If that failed, then give up. */
3756 if (subtarget
!= target
)
3757 convert_move (target
, subtarget
, 0);
3762 /* Return non-zero if a conditional move of mode MODE is supported.
3764 This function is for combine so it can tell whether an insn that looks
3765 like a conditional move is actually supported by the hardware. If we
3766 guess wrong we lose a bit on optimization, but that's it. */
3767 /* ??? sparc64 supports conditionally moving integers values based on fp
3768 comparisons, and vice versa. How do we handle them? */
3771 can_conditionally_move_p (mode
)
3772 enum machine_mode mode
;
3774 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3780 #endif /* HAVE_conditional_move */
3782 /* These functions generate an insn body and return it
3783 rather than emitting the insn.
3785 They do not protect from queued increments,
3786 because they may be used 1) in protect_from_queue itself
3787 and 2) in other passes where there is no queue. */
3789 /* Generate and return an insn body to add Y to X. */
3792 gen_add2_insn (x
, y
)
3795 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3797 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3798 (x
, insn_data
[icode
].operand
[0].mode
))
3799 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3800 (x
, insn_data
[icode
].operand
[1].mode
))
3801 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3802 (y
, insn_data
[icode
].operand
[2].mode
)))
3805 return (GEN_FCN (icode
) (x
, x
, y
));
3808 /* Generate and return an insn body to add r1 and c,
3809 storing the result in r0. */
3811 gen_add3_insn (r0
, r1
, c
)
3814 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3816 if (icode
== CODE_FOR_nothing
3817 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3818 (r0
, insn_data
[icode
].operand
[0].mode
))
3819 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3820 (r1
, insn_data
[icode
].operand
[1].mode
))
3821 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3822 (c
, insn_data
[icode
].operand
[2].mode
)))
3825 return (GEN_FCN (icode
) (r0
, r1
, c
));
3829 have_add2_insn (x
, y
)
3834 if (GET_MODE (x
) == VOIDmode
)
3837 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3839 if (icode
== CODE_FOR_nothing
)
3842 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3843 (x
, insn_data
[icode
].operand
[0].mode
))
3844 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3845 (x
, insn_data
[icode
].operand
[1].mode
))
3846 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3847 (y
, insn_data
[icode
].operand
[2].mode
)))
3853 /* Generate and return an insn body to subtract Y from X. */
3856 gen_sub2_insn (x
, y
)
3859 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3861 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3862 (x
, insn_data
[icode
].operand
[0].mode
))
3863 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3864 (x
, insn_data
[icode
].operand
[1].mode
))
3865 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3866 (y
, insn_data
[icode
].operand
[2].mode
)))
3869 return (GEN_FCN (icode
) (x
, x
, y
));
3872 /* Generate and return an insn body to subtract r1 and c,
3873 storing the result in r0. */
3875 gen_sub3_insn (r0
, r1
, c
)
3878 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3880 if (icode
== CODE_FOR_nothing
3881 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3882 (r0
, insn_data
[icode
].operand
[0].mode
))
3883 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3884 (r1
, insn_data
[icode
].operand
[1].mode
))
3885 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3886 (c
, insn_data
[icode
].operand
[2].mode
)))
3889 return (GEN_FCN (icode
) (r0
, r1
, c
));
3893 have_sub2_insn (x
, y
)
3898 if (GET_MODE (x
) == VOIDmode
)
3901 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3903 if (icode
== CODE_FOR_nothing
)
3906 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3907 (x
, insn_data
[icode
].operand
[0].mode
))
3908 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3909 (x
, insn_data
[icode
].operand
[1].mode
))
3910 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3911 (y
, insn_data
[icode
].operand
[2].mode
)))
3917 /* Generate the body of an instruction to copy Y into X.
3918 It may be a SEQUENCE, if one insn isn't enough. */
3921 gen_move_insn (x
, y
)
3924 enum machine_mode mode
= GET_MODE (x
);
3925 enum insn_code insn_code
;
3928 if (mode
== VOIDmode
)
3929 mode
= GET_MODE (y
);
3931 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
3933 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
3934 find a mode to do it in. If we have a movcc, use it. Otherwise,
3935 find the MODE_INT mode of the same width. */
3937 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
3939 enum machine_mode tmode
= VOIDmode
;
3943 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
3946 for (tmode
= QImode
; tmode
!= VOIDmode
;
3947 tmode
= GET_MODE_WIDER_MODE (tmode
))
3948 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
3951 if (tmode
== VOIDmode
)
3954 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
3955 may call change_address which is not appropriate if we were
3956 called when a reload was in progress. We don't have to worry
3957 about changing the address since the size in bytes is supposed to
3958 be the same. Copy the MEM to change the mode and move any
3959 substitutions from the old MEM to the new one. */
3961 if (reload_in_progress
)
3963 x
= gen_lowpart_common (tmode
, x1
);
3964 if (x
== 0 && GET_CODE (x1
) == MEM
)
3966 x
= adjust_address_nv (x1
, tmode
, 0);
3967 copy_replacements (x1
, x
);
3970 y
= gen_lowpart_common (tmode
, y1
);
3971 if (y
== 0 && GET_CODE (y1
) == MEM
)
3973 y
= adjust_address_nv (y1
, tmode
, 0);
3974 copy_replacements (y1
, y
);
3979 x
= gen_lowpart (tmode
, x
);
3980 y
= gen_lowpart (tmode
, y
);
3983 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
3984 return (GEN_FCN (insn_code
) (x
, y
));
3988 emit_move_insn_1 (x
, y
);
3989 seq
= gen_sequence ();
3994 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3995 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3996 no such operation exists, CODE_FOR_nothing will be returned. */
3999 can_extend_p (to_mode
, from_mode
, unsignedp
)
4000 enum machine_mode to_mode
, from_mode
;
4003 #ifdef HAVE_ptr_extend
4005 return CODE_FOR_ptr_extend
;
4008 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4011 /* Generate the body of an insn to extend Y (with mode MFROM)
4012 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4015 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4017 enum machine_mode mto
, mfrom
;
4020 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4023 /* can_fix_p and can_float_p say whether the target machine
4024 can directly convert a given fixed point type to
4025 a given floating point type, or vice versa.
4026 The returned value is the CODE_FOR_... value to use,
4027 or CODE_FOR_nothing if these modes cannot be directly converted.
4029 *TRUNCP_PTR is set to 1 if it is necessary to output
4030 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4032 static enum insn_code
4033 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4034 enum machine_mode fltmode
, fixmode
;
4039 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4040 != CODE_FOR_nothing
)
4041 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4043 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4046 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4048 return CODE_FOR_nothing
;
4051 static enum insn_code
4052 can_float_p (fltmode
, fixmode
, unsignedp
)
4053 enum machine_mode fixmode
, fltmode
;
4056 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4059 /* Generate code to convert FROM to floating point
4060 and store in TO. FROM must be fixed point and not VOIDmode.
4061 UNSIGNEDP nonzero means regard FROM as unsigned.
4062 Normally this is done by correcting the final value
4063 if it is negative. */
4066 expand_float (to
, from
, unsignedp
)
4070 enum insn_code icode
;
4072 enum machine_mode fmode
, imode
;
4074 /* Crash now, because we won't be able to decide which mode to use. */
4075 if (GET_MODE (from
) == VOIDmode
)
4078 /* Look for an insn to do the conversion. Do it in the specified
4079 modes if possible; otherwise convert either input, output or both to
4080 wider mode. If the integer mode is wider than the mode of FROM,
4081 we can do the conversion signed even if the input is unsigned. */
4083 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4084 imode
= GET_MODE_WIDER_MODE (imode
))
4085 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4086 fmode
= GET_MODE_WIDER_MODE (fmode
))
4088 int doing_unsigned
= unsignedp
;
4090 if (fmode
!= GET_MODE (to
)
4091 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4094 icode
= can_float_p (fmode
, imode
, unsignedp
);
4095 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4096 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4098 if (icode
!= CODE_FOR_nothing
)
4100 to
= protect_from_queue (to
, 1);
4101 from
= protect_from_queue (from
, 0);
4103 if (imode
!= GET_MODE (from
))
4104 from
= convert_to_mode (imode
, from
, unsignedp
);
4106 if (fmode
!= GET_MODE (to
))
4107 target
= gen_reg_rtx (fmode
);
4109 emit_unop_insn (icode
, target
, from
,
4110 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4113 convert_move (to
, target
, 0);
4118 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4120 /* Unsigned integer, and no way to convert directly.
4121 Convert as signed, then conditionally adjust the result. */
4124 rtx label
= gen_label_rtx ();
4126 REAL_VALUE_TYPE offset
;
4130 to
= protect_from_queue (to
, 1);
4131 from
= protect_from_queue (from
, 0);
4134 from
= force_not_mem (from
);
4136 /* Look for a usable floating mode FMODE wider than the source and at
4137 least as wide as the target. Using FMODE will avoid rounding woes
4138 with unsigned values greater than the signed maximum value. */
4140 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4141 fmode
= GET_MODE_WIDER_MODE (fmode
))
4142 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4143 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4146 if (fmode
== VOIDmode
)
4148 /* There is no such mode. Pretend the target is wide enough. */
4149 fmode
= GET_MODE (to
);
4151 /* Avoid double-rounding when TO is narrower than FROM. */
4152 if ((significand_size (fmode
) + 1)
4153 < GET_MODE_BITSIZE (GET_MODE (from
)))
4156 rtx neglabel
= gen_label_rtx ();
4158 /* Don't use TARGET if it isn't a register, is a hard register,
4159 or is the wrong mode. */
4160 if (GET_CODE (target
) != REG
4161 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4162 || GET_MODE (target
) != fmode
)
4163 target
= gen_reg_rtx (fmode
);
4165 imode
= GET_MODE (from
);
4166 do_pending_stack_adjust ();
4168 /* Test whether the sign bit is set. */
4169 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4172 /* The sign bit is not set. Convert as signed. */
4173 expand_float (target
, from
, 0);
4174 emit_jump_insn (gen_jump (label
));
4177 /* The sign bit is set.
4178 Convert to a usable (positive signed) value by shifting right
4179 one bit, while remembering if a nonzero bit was shifted
4180 out; i.e., compute (from & 1) | (from >> 1). */
4182 emit_label (neglabel
);
4183 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4184 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4185 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4187 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4189 expand_float (target
, temp
, 0);
4191 /* Multiply by 2 to undo the shift above. */
4192 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4193 target
, 0, OPTAB_LIB_WIDEN
);
4195 emit_move_insn (target
, temp
);
4197 do_pending_stack_adjust ();
4203 /* If we are about to do some arithmetic to correct for an
4204 unsigned operand, do it in a pseudo-register. */
4206 if (GET_MODE (to
) != fmode
4207 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4208 target
= gen_reg_rtx (fmode
);
4210 /* Convert as signed integer to floating. */
4211 expand_float (target
, from
, 0);
4213 /* If FROM is negative (and therefore TO is negative),
4214 correct its value by 2**bitwidth. */
4216 do_pending_stack_adjust ();
4217 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4220 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
4221 Rather than setting up a dconst_dot_5, let's hope SCO
4223 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
4224 temp
= expand_binop (fmode
, add_optab
, target
,
4225 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4226 target
, 0, OPTAB_LIB_WIDEN
);
4228 emit_move_insn (target
, temp
);
4230 do_pending_stack_adjust ();
4236 /* No hardware instruction available; call a library routine to convert from
4237 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4243 to
= protect_from_queue (to
, 1);
4244 from
= protect_from_queue (from
, 0);
4246 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4247 from
= convert_to_mode (SImode
, from
, unsignedp
);
4250 from
= force_not_mem (from
);
4252 if (GET_MODE (to
) == SFmode
)
4254 if (GET_MODE (from
) == SImode
)
4255 libfcn
= floatsisf_libfunc
;
4256 else if (GET_MODE (from
) == DImode
)
4257 libfcn
= floatdisf_libfunc
;
4258 else if (GET_MODE (from
) == TImode
)
4259 libfcn
= floattisf_libfunc
;
4263 else if (GET_MODE (to
) == DFmode
)
4265 if (GET_MODE (from
) == SImode
)
4266 libfcn
= floatsidf_libfunc
;
4267 else if (GET_MODE (from
) == DImode
)
4268 libfcn
= floatdidf_libfunc
;
4269 else if (GET_MODE (from
) == TImode
)
4270 libfcn
= floattidf_libfunc
;
4274 else if (GET_MODE (to
) == XFmode
)
4276 if (GET_MODE (from
) == SImode
)
4277 libfcn
= floatsixf_libfunc
;
4278 else if (GET_MODE (from
) == DImode
)
4279 libfcn
= floatdixf_libfunc
;
4280 else if (GET_MODE (from
) == TImode
)
4281 libfcn
= floattixf_libfunc
;
4285 else if (GET_MODE (to
) == TFmode
)
4287 if (GET_MODE (from
) == SImode
)
4288 libfcn
= floatsitf_libfunc
;
4289 else if (GET_MODE (from
) == DImode
)
4290 libfcn
= floatditf_libfunc
;
4291 else if (GET_MODE (from
) == TImode
)
4292 libfcn
= floattitf_libfunc
;
4301 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4302 GET_MODE (to
), 1, from
,
4304 insns
= get_insns ();
4307 emit_libcall_block (insns
, target
, value
,
4308 gen_rtx_FLOAT (GET_MODE (to
), from
));
4313 /* Copy result to requested destination
4314 if we have been computing in a temp location. */
4318 if (GET_MODE (target
) == GET_MODE (to
))
4319 emit_move_insn (to
, target
);
4321 convert_move (to
, target
, 0);
4325 /* expand_fix: generate code to convert FROM to fixed point
4326 and store in TO. FROM must be floating point. */
4332 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4333 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4337 expand_fix (to
, from
, unsignedp
)
4341 enum insn_code icode
;
4343 enum machine_mode fmode
, imode
;
4347 /* We first try to find a pair of modes, one real and one integer, at
4348 least as wide as FROM and TO, respectively, in which we can open-code
4349 this conversion. If the integer mode is wider than the mode of TO,
4350 we can do the conversion either signed or unsigned. */
4352 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4353 imode
= GET_MODE_WIDER_MODE (imode
))
4354 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4355 fmode
= GET_MODE_WIDER_MODE (fmode
))
4357 int doing_unsigned
= unsignedp
;
4359 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4360 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4361 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4363 if (icode
!= CODE_FOR_nothing
)
4365 to
= protect_from_queue (to
, 1);
4366 from
= protect_from_queue (from
, 0);
4368 if (fmode
!= GET_MODE (from
))
4369 from
= convert_to_mode (fmode
, from
, 0);
4372 from
= ftruncify (from
);
4374 if (imode
!= GET_MODE (to
))
4375 target
= gen_reg_rtx (imode
);
4377 emit_unop_insn (icode
, target
, from
,
4378 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4380 convert_move (to
, target
, unsignedp
);
4385 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4386 /* For an unsigned conversion, there is one more way to do it.
4387 If we have a signed conversion, we generate code that compares
4388 the real value to the largest representable positive number. If if
4389 is smaller, the conversion is done normally. Otherwise, subtract
4390 one plus the highest signed number, convert, and add it back.
4392 We only need to check all real modes, since we know we didn't find
4393 anything with a wider integer mode. */
4395 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4396 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4397 fmode
= GET_MODE_WIDER_MODE (fmode
))
4398 /* Make sure we won't lose significant bits doing this. */
4399 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
4400 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4404 REAL_VALUE_TYPE offset
;
4405 rtx limit
, lab1
, lab2
, insn
;
4407 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4408 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
4409 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4410 lab1
= gen_label_rtx ();
4411 lab2
= gen_label_rtx ();
4414 to
= protect_from_queue (to
, 1);
4415 from
= protect_from_queue (from
, 0);
4418 from
= force_not_mem (from
);
4420 if (fmode
!= GET_MODE (from
))
4421 from
= convert_to_mode (fmode
, from
, 0);
4423 /* See if we need to do the subtraction. */
4424 do_pending_stack_adjust ();
4425 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4428 /* If not, do the signed "fix" and branch around fixup code. */
4429 expand_fix (to
, from
, 0);
4430 emit_jump_insn (gen_jump (lab2
));
4433 /* Otherwise, subtract 2**(N-1), convert to signed number,
4434 then add 2**(N-1). Do the addition using XOR since this
4435 will often generate better code. */
4437 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4438 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4439 expand_fix (to
, target
, 0);
4440 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4441 GEN_INT (trunc_int_for_mode
4442 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4444 to
, 1, OPTAB_LIB_WIDEN
);
4447 emit_move_insn (to
, target
);
4451 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4452 != CODE_FOR_nothing
)
4454 /* Make a place for a REG_NOTE and add it. */
4455 insn
= emit_move_insn (to
, to
);
4456 set_unique_reg_note (insn
,
4458 gen_rtx_fmt_e (UNSIGNED_FIX
,
4467 /* We can't do it with an insn, so use a library call. But first ensure
4468 that the mode of TO is at least as wide as SImode, since those are the
4469 only library calls we know about. */
4471 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4473 target
= gen_reg_rtx (SImode
);
4475 expand_fix (target
, from
, unsignedp
);
4477 else if (GET_MODE (from
) == SFmode
)
4479 if (GET_MODE (to
) == SImode
)
4480 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
4481 else if (GET_MODE (to
) == DImode
)
4482 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
4483 else if (GET_MODE (to
) == TImode
)
4484 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
4488 else if (GET_MODE (from
) == DFmode
)
4490 if (GET_MODE (to
) == SImode
)
4491 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
4492 else if (GET_MODE (to
) == DImode
)
4493 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
4494 else if (GET_MODE (to
) == TImode
)
4495 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
4499 else if (GET_MODE (from
) == XFmode
)
4501 if (GET_MODE (to
) == SImode
)
4502 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
4503 else if (GET_MODE (to
) == DImode
)
4504 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
4505 else if (GET_MODE (to
) == TImode
)
4506 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
4510 else if (GET_MODE (from
) == TFmode
)
4512 if (GET_MODE (to
) == SImode
)
4513 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
4514 else if (GET_MODE (to
) == DImode
)
4515 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
4516 else if (GET_MODE (to
) == TImode
)
4517 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
4529 to
= protect_from_queue (to
, 1);
4530 from
= protect_from_queue (from
, 0);
4533 from
= force_not_mem (from
);
4537 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4538 GET_MODE (to
), 1, from
,
4540 insns
= get_insns ();
4543 emit_libcall_block (insns
, target
, value
,
4544 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4545 GET_MODE (to
), from
));
4550 if (GET_MODE (to
) == GET_MODE (target
))
4551 emit_move_insn (to
, target
);
4553 convert_move (to
, target
, 0);
4557 /* Report whether we have an instruction to perform the operation
4558 specified by CODE on operands of mode MODE. */
4560 have_insn_for (code
, mode
)
4562 enum machine_mode mode
;
4564 return (code_to_optab
[(int) code
] != 0
4565 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4566 != CODE_FOR_nothing
));
4569 /* Create a blank optab. */
4574 optab op
= (optab
) xmalloc (sizeof (struct optab
));
4575 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4577 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4578 op
->handlers
[i
].libfunc
= 0;
4584 /* Same, but fill in its code as CODE, and write it into the
4585 code_to_optab table. */
4590 optab op
= new_optab ();
4592 code_to_optab
[(int) code
] = op
;
4596 /* Same, but fill in its code as CODE, and do _not_ write it into
4597 the code_to_optab table. */
4602 optab op
= new_optab ();
4607 /* Initialize the libfunc fields of an entire group of entries in some
4608 optab. Each entry is set equal to a string consisting of a leading
4609 pair of underscores followed by a generic operation name followed by
4610 a mode name (downshifted to lower case) followed by a single character
4611 representing the number of operands for the given operation (which is
4612 usually one of the characters '2', '3', or '4').
4614 OPTABLE is the table in which libfunc fields are to be initialized.
4615 FIRST_MODE is the first machine mode index in the given optab to
4617 LAST_MODE is the last machine mode index in the given optab to
4619 OPNAME is the generic (string) name of the operation.
4620 SUFFIX is the character which specifies the number of operands for
4621 the given generic operation.
4625 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
4633 unsigned opname_len
= strlen (opname
);
4635 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4636 mode
= (enum machine_mode
) ((int) mode
+ 1))
4638 const char *mname
= GET_MODE_NAME(mode
);
4639 unsigned mname_len
= strlen (mname
);
4640 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4647 for (q
= opname
; *q
; )
4649 for (q
= mname
; *q
; q
++)
4650 *p
++ = TOLOWER (*q
);
4654 optable
->handlers
[(int) mode
].libfunc
4655 = gen_rtx_SYMBOL_REF (Pmode
, ggc_alloc_string (libfunc_name
,
4660 /* Initialize the libfunc fields of an entire group of entries in some
4661 optab which correspond to all integer mode operations. The parameters
4662 have the same meaning as similarly named ones for the `init_libfuncs'
4663 routine. (See above). */
4666 init_integral_libfuncs (optable
, opname
, suffix
)
4671 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
4674 /* Initialize the libfunc fields of an entire group of entries in some
4675 optab which correspond to all real mode operations. The parameters
4676 have the same meaning as similarly named ones for the `init_libfuncs'
4677 routine. (See above). */
4680 init_floating_libfuncs (optable
, opname
, suffix
)
4685 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
4689 init_one_libfunc (name
)
4692 /* Create a FUNCTION_DECL that can be passed to ENCODE_SECTION_INFO. */
4693 /* ??? We don't have any type information except for this is
4694 a function. Pretend this is "int foo()". */
4695 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4696 build_function_type (integer_type_node
, NULL_TREE
));
4697 DECL_ARTIFICIAL (decl
) = 1;
4698 DECL_EXTERNAL (decl
) = 1;
4699 TREE_PUBLIC (decl
) = 1;
4701 /* Return the symbol_ref from the mem rtx. */
4702 return XEXP (DECL_RTL (decl
), 0);
4705 /* Mark ARG (which is really an OPTAB *) for GC. */
4711 optab o
= *(optab
*) arg
;
4714 for (i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
4715 ggc_mark_rtx (o
->handlers
[i
].libfunc
);
4718 /* Call this once to initialize the contents of the optabs
4719 appropriately for the current target machine. */
4724 unsigned int i
, j
, k
;
4726 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4728 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
4729 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
4730 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
4731 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
4733 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
4734 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
4735 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
4736 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
4738 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
4739 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
4740 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
4741 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
4743 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
4744 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
4745 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
4746 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
4748 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4749 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4751 #ifdef HAVE_conditional_move
4752 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4753 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4756 add_optab
= init_optab (PLUS
);
4757 addv_optab
= init_optabv (PLUS
);
4758 sub_optab
= init_optab (MINUS
);
4759 subv_optab
= init_optabv (MINUS
);
4760 smul_optab
= init_optab (MULT
);
4761 smulv_optab
= init_optabv (MULT
);
4762 smul_highpart_optab
= init_optab (UNKNOWN
);
4763 umul_highpart_optab
= init_optab (UNKNOWN
);
4764 smul_widen_optab
= init_optab (UNKNOWN
);
4765 umul_widen_optab
= init_optab (UNKNOWN
);
4766 sdiv_optab
= init_optab (DIV
);
4767 sdivv_optab
= init_optabv (DIV
);
4768 sdivmod_optab
= init_optab (UNKNOWN
);
4769 udiv_optab
= init_optab (UDIV
);
4770 udivmod_optab
= init_optab (UNKNOWN
);
4771 smod_optab
= init_optab (MOD
);
4772 umod_optab
= init_optab (UMOD
);
4773 ftrunc_optab
= init_optab (UNKNOWN
);
4774 and_optab
= init_optab (AND
);
4775 ior_optab
= init_optab (IOR
);
4776 xor_optab
= init_optab (XOR
);
4777 ashl_optab
= init_optab (ASHIFT
);
4778 ashr_optab
= init_optab (ASHIFTRT
);
4779 lshr_optab
= init_optab (LSHIFTRT
);
4780 rotl_optab
= init_optab (ROTATE
);
4781 rotr_optab
= init_optab (ROTATERT
);
4782 smin_optab
= init_optab (SMIN
);
4783 smax_optab
= init_optab (SMAX
);
4784 umin_optab
= init_optab (UMIN
);
4785 umax_optab
= init_optab (UMAX
);
4787 /* These three have codes assigned exclusively for the sake of
4789 mov_optab
= init_optab (SET
);
4790 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4791 cmp_optab
= init_optab (COMPARE
);
4793 ucmp_optab
= init_optab (UNKNOWN
);
4794 tst_optab
= init_optab (UNKNOWN
);
4795 neg_optab
= init_optab (NEG
);
4796 negv_optab
= init_optabv (NEG
);
4797 abs_optab
= init_optab (ABS
);
4798 absv_optab
= init_optabv (ABS
);
4799 one_cmpl_optab
= init_optab (NOT
);
4800 ffs_optab
= init_optab (FFS
);
4801 sqrt_optab
= init_optab (SQRT
);
4802 sin_optab
= init_optab (UNKNOWN
);
4803 cos_optab
= init_optab (UNKNOWN
);
4804 strlen_optab
= init_optab (UNKNOWN
);
4805 cbranch_optab
= init_optab (UNKNOWN
);
4806 cmov_optab
= init_optab (UNKNOWN
);
4807 cstore_optab
= init_optab (UNKNOWN
);
4808 push_optab
= init_optab (UNKNOWN
);
4810 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4812 movstr_optab
[i
] = CODE_FOR_nothing
;
4813 clrstr_optab
[i
] = CODE_FOR_nothing
;
4815 #ifdef HAVE_SECONDARY_RELOADS
4816 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4820 /* Fill in the optabs with the insns we support. */
4823 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4824 /* This flag says the same insns that convert to a signed fixnum
4825 also convert validly to an unsigned one. */
4826 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4827 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4828 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4831 /* Initialize the optabs with the names of the library functions. */
4832 init_integral_libfuncs (add_optab
, "add", '3');
4833 init_floating_libfuncs (add_optab
, "add", '3');
4834 init_integral_libfuncs (addv_optab
, "addv", '3');
4835 init_floating_libfuncs (addv_optab
, "add", '3');
4836 init_integral_libfuncs (sub_optab
, "sub", '3');
4837 init_floating_libfuncs (sub_optab
, "sub", '3');
4838 init_integral_libfuncs (subv_optab
, "subv", '3');
4839 init_floating_libfuncs (subv_optab
, "sub", '3');
4840 init_integral_libfuncs (smul_optab
, "mul", '3');
4841 init_floating_libfuncs (smul_optab
, "mul", '3');
4842 init_integral_libfuncs (smulv_optab
, "mulv", '3');
4843 init_floating_libfuncs (smulv_optab
, "mul", '3');
4844 init_integral_libfuncs (sdiv_optab
, "div", '3');
4845 init_floating_libfuncs (sdiv_optab
, "div", '3');
4846 init_integral_libfuncs (sdivv_optab
, "divv", '3');
4847 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4848 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4849 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4850 init_integral_libfuncs (smod_optab
, "mod", '3');
4851 init_integral_libfuncs (umod_optab
, "umod", '3');
4852 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4853 init_integral_libfuncs (and_optab
, "and", '3');
4854 init_integral_libfuncs (ior_optab
, "ior", '3');
4855 init_integral_libfuncs (xor_optab
, "xor", '3');
4856 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4857 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4858 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4859 init_integral_libfuncs (smin_optab
, "min", '3');
4860 init_floating_libfuncs (smin_optab
, "min", '3');
4861 init_integral_libfuncs (smax_optab
, "max", '3');
4862 init_floating_libfuncs (smax_optab
, "max", '3');
4863 init_integral_libfuncs (umin_optab
, "umin", '3');
4864 init_integral_libfuncs (umax_optab
, "umax", '3');
4865 init_integral_libfuncs (neg_optab
, "neg", '2');
4866 init_floating_libfuncs (neg_optab
, "neg", '2');
4867 init_integral_libfuncs (negv_optab
, "negv", '2');
4868 init_floating_libfuncs (negv_optab
, "neg", '2');
4869 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4870 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4872 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
4873 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4874 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4875 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4877 #ifdef MULSI3_LIBCALL
4878 smul_optab
->handlers
[(int) SImode
].libfunc
4879 = init_one_libfunc (MULSI3_LIBCALL
);
4881 #ifdef MULDI3_LIBCALL
4882 smul_optab
->handlers
[(int) DImode
].libfunc
4883 = init_one_libfunc (MULDI3_LIBCALL
);
4886 #ifdef DIVSI3_LIBCALL
4887 sdiv_optab
->handlers
[(int) SImode
].libfunc
4888 = init_one_libfunc (DIVSI3_LIBCALL
);
4890 #ifdef DIVDI3_LIBCALL
4891 sdiv_optab
->handlers
[(int) DImode
].libfunc
4892 = init_one_libfunc (DIVDI3_LIBCALL
);
4895 #ifdef UDIVSI3_LIBCALL
4896 udiv_optab
->handlers
[(int) SImode
].libfunc
4897 = init_one_libfunc (UDIVSI3_LIBCALL
);
4899 #ifdef UDIVDI3_LIBCALL
4900 udiv_optab
->handlers
[(int) DImode
].libfunc
4901 = init_one_libfunc (UDIVDI3_LIBCALL
);
4904 #ifdef MODSI3_LIBCALL
4905 smod_optab
->handlers
[(int) SImode
].libfunc
4906 = init_one_libfunc (MODSI3_LIBCALL
);
4908 #ifdef MODDI3_LIBCALL
4909 smod_optab
->handlers
[(int) DImode
].libfunc
4910 = init_one_libfunc (MODDI3_LIBCALL
);
4913 #ifdef UMODSI3_LIBCALL
4914 umod_optab
->handlers
[(int) SImode
].libfunc
4915 = init_one_libfunc (UMODSI3_LIBCALL
);
4917 #ifdef UMODDI3_LIBCALL
4918 umod_optab
->handlers
[(int) DImode
].libfunc
4919 = init_one_libfunc (UMODDI3_LIBCALL
);
4922 /* Use cabs for DC complex abs, since systems generally have cabs.
4923 Don't define any libcall for SCmode, so that cabs will be used. */
4924 abs_optab
->handlers
[(int) DCmode
].libfunc
4925 = init_one_libfunc ("cabs");
4927 /* The ffs function operates on `int'. */
4928 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
4929 = init_one_libfunc ("ffs");
4931 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
4932 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
4933 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
4934 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
4935 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
4937 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
4938 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
4939 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
4940 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
4941 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
4943 memcpy_libfunc
= init_one_libfunc ("memcpy");
4944 memmove_libfunc
= init_one_libfunc ("memmove");
4945 bcopy_libfunc
= init_one_libfunc ("bcopy");
4946 memcmp_libfunc
= init_one_libfunc ("memcmp");
4947 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
4948 memset_libfunc
= init_one_libfunc ("memset");
4949 bzero_libfunc
= init_one_libfunc ("bzero");
4951 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
4952 ? "_Unwind_SjLj_Resume"
4953 : "_Unwind_Resume");
4954 #ifndef DONT_USE_BUILTIN_SETJMP
4955 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
4956 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
4958 setjmp_libfunc
= init_one_libfunc ("setjmp");
4959 longjmp_libfunc
= init_one_libfunc ("longjmp");
4961 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
4962 unwind_sjlj_unregister_libfunc
4963 = init_one_libfunc ("_Unwind_SjLj_Unregister");
4965 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
4966 nehf2_libfunc
= init_one_libfunc ("__nehf2");
4967 gthf2_libfunc
= init_one_libfunc ("__gthf2");
4968 gehf2_libfunc
= init_one_libfunc ("__gehf2");
4969 lthf2_libfunc
= init_one_libfunc ("__lthf2");
4970 lehf2_libfunc
= init_one_libfunc ("__lehf2");
4971 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
4973 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
4974 nesf2_libfunc
= init_one_libfunc ("__nesf2");
4975 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
4976 gesf2_libfunc
= init_one_libfunc ("__gesf2");
4977 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
4978 lesf2_libfunc
= init_one_libfunc ("__lesf2");
4979 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
4981 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
4982 nedf2_libfunc
= init_one_libfunc ("__nedf2");
4983 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
4984 gedf2_libfunc
= init_one_libfunc ("__gedf2");
4985 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
4986 ledf2_libfunc
= init_one_libfunc ("__ledf2");
4987 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
4989 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
4990 nexf2_libfunc
= init_one_libfunc ("__nexf2");
4991 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
4992 gexf2_libfunc
= init_one_libfunc ("__gexf2");
4993 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
4994 lexf2_libfunc
= init_one_libfunc ("__lexf2");
4995 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
4997 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
4998 netf2_libfunc
= init_one_libfunc ("__netf2");
4999 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5000 getf2_libfunc
= init_one_libfunc ("__getf2");
5001 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5002 letf2_libfunc
= init_one_libfunc ("__letf2");
5003 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5005 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5006 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5007 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5009 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5010 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5011 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5013 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5014 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5015 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5017 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5018 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5019 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5021 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5022 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5023 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5025 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5026 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5027 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5029 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5030 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5031 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5033 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5034 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5035 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5037 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5038 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5039 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5041 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5042 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5043 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5045 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5046 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5047 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5049 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5050 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5051 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5053 /* For function entry/exit instrumentation. */
5054 profile_function_entry_libfunc
5055 = init_one_libfunc ("__cyg_profile_func_enter");
5056 profile_function_exit_libfunc
5057 = init_one_libfunc ("__cyg_profile_func_exit");
5059 #ifdef HAVE_conditional_trap
5063 #ifdef INIT_TARGET_OPTABS
5064 /* Allow the target to add more libcalls or rename some, etc. */
5068 /* Add these GC roots. */
5069 ggc_add_root (optab_table
, OTI_MAX
, sizeof(optab
), mark_optab
);
5070 ggc_add_rtx_root (libfunc_table
, LTI_MAX
);
5073 #ifdef HAVE_conditional_trap
5074 /* The insn generating function can not take an rtx_code argument.
5075 TRAP_RTX is used as an rtx argument. Its code is replaced with
5076 the code to be used in the trap insn and all other fields are
5078 static rtx trap_rtx
;
5083 if (HAVE_conditional_trap
)
5085 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5086 ggc_add_rtx_root (&trap_rtx
, 1);
5091 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5092 CODE. Return 0 on failure. */
5095 gen_cond_trap (code
, op1
, op2
, tcode
)
5096 enum rtx_code code ATTRIBUTE_UNUSED
;
5097 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5099 enum machine_mode mode
= GET_MODE (op1
);
5101 if (mode
== VOIDmode
)
5104 #ifdef HAVE_conditional_trap
5105 if (HAVE_conditional_trap
5106 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5110 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5111 PUT_CODE (trap_rtx
, code
);
5112 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5116 insn
= gen_sequence ();