1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 /* Include insn-config.h before expr.h so that HAVE_conditional_move
28 is properly defined. */
29 #include "insn-config.h"
44 /* Each optab contains info on how this target machine
45 can perform a particular operation
46 for all sizes and kinds of operands.
48 The operation to be performed is often specified
49 by passing one of these optabs as an argument.
51 See expr.h for documentation of these optabs. */
53 optab optab_table
[OTI_MAX
];
55 rtx libfunc_table
[LTI_MAX
];
57 /* Tables of patterns for extending one integer mode to another. */
58 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
60 /* Tables of patterns for converting between fixed and floating point. */
61 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
62 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
63 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab
[NUM_RTX_CODE
+ 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
88 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
89 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
90 enum machine_mode
, int, int));
91 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
92 rtx
, rtx
, enum machine_mode
,
93 int, enum optab_methods
,
94 enum mode_class
, optab
));
95 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
96 rtx
, rtx
, enum machine_mode
,
97 int, enum optab_methods
,
98 enum mode_class
, optab
));
99 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
100 enum machine_mode
*, int *,
101 enum can_compare_purpose
));
102 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
104 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
107 static rtx ftruncify
PARAMS ((rtx
));
108 static optab new_optab
PARAMS ((void));
109 static inline optab init_optab
PARAMS ((enum rtx_code
));
110 static inline optab init_optabv
PARAMS ((enum rtx_code
));
111 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
112 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
113 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
114 #ifdef HAVE_conditional_trap
115 static void init_traps
PARAMS ((void));
117 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
118 enum rtx_code
, int, rtx
));
119 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
120 enum machine_mode
*, int *));
122 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
123 the result of operation CODE applied to OP0 (and OP1 if it is a binary
126 If the last insn does not set TARGET, don't do anything, but return 1.
128 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
129 don't add the REG_EQUAL note but return 0. Our caller can then try
130 again, ensuring that TARGET is not one of the operands. */
133 add_equal_note (seq
, target
, code
, op0
, op1
)
143 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
144 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
145 || GET_CODE (seq
) != SEQUENCE
146 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
147 || GET_CODE (target
) == ZERO_EXTRACT
148 || (! rtx_equal_p (SET_DEST (set
), target
)
149 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
151 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
152 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
156 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
157 besides the last insn. */
158 if (reg_overlap_mentioned_p (target
, op0
)
159 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
160 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
161 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
164 if (GET_RTX_CLASS (code
) == '1')
165 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
167 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
169 set_unique_reg_note (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1), REG_EQUAL
, note
);
174 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
175 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
176 not actually do a sign-extend or zero-extend, but can leave the
177 higher-order bits of the result rtx undefined, for example, in the case
178 of logical operations, but not right shifts. */
181 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
183 enum machine_mode mode
, oldmode
;
189 /* If we must extend do so. If OP is either a constant or a SUBREG
190 for a promoted object, also extend since it will be more efficient to
193 || GET_MODE (op
) == VOIDmode
194 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)))
195 return convert_modes (mode
, oldmode
, op
, unsignedp
);
197 /* If MODE is no wider than a single word, we return a paradoxical
199 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
200 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
202 /* Otherwise, get an object of MODE, clobber it, and set the low-order
205 result
= gen_reg_rtx (mode
);
206 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
207 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
211 /* Generate code to perform a straightforward complex divide. */
214 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
215 unsignedp
, methods
, class, binoptab
)
216 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
217 enum machine_mode submode
;
219 enum optab_methods methods
;
220 enum mode_class
class;
227 optab this_add_optab
= add_optab
;
228 optab this_sub_optab
= sub_optab
;
229 optab this_neg_optab
= neg_optab
;
230 optab this_mul_optab
= smul_optab
;
232 if (binoptab
== sdivv_optab
)
234 this_add_optab
= addv_optab
;
235 this_sub_optab
= subv_optab
;
236 this_neg_optab
= negv_optab
;
237 this_mul_optab
= smulv_optab
;
240 /* Don't fetch these from memory more than once. */
241 real0
= force_reg (submode
, real0
);
242 real1
= force_reg (submode
, real1
);
245 imag0
= force_reg (submode
, imag0
);
247 imag1
= force_reg (submode
, imag1
);
249 /* Divisor: c*c + d*d. */
250 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
251 NULL_RTX
, unsignedp
, methods
);
253 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
254 NULL_RTX
, unsignedp
, methods
);
256 if (temp1
== 0 || temp2
== 0)
259 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
260 NULL_RTX
, unsignedp
, methods
);
266 /* Mathematically, ((a)(c-id))/divisor. */
267 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
269 /* Calculate the dividend. */
270 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
271 NULL_RTX
, unsignedp
, methods
);
273 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
274 NULL_RTX
, unsignedp
, methods
);
276 if (real_t
== 0 || imag_t
== 0)
279 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
280 NULL_RTX
, unsignedp
);
284 /* Mathematically, ((a+ib)(c-id))/divider. */
285 /* Calculate the dividend. */
286 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
287 NULL_RTX
, unsignedp
, methods
);
289 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
290 NULL_RTX
, unsignedp
, methods
);
292 if (temp1
== 0 || temp2
== 0)
295 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
296 NULL_RTX
, unsignedp
, methods
);
298 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
299 NULL_RTX
, unsignedp
, methods
);
301 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
302 NULL_RTX
, unsignedp
, methods
);
304 if (temp1
== 0 || temp2
== 0)
307 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
308 NULL_RTX
, unsignedp
, methods
);
310 if (real_t
== 0 || imag_t
== 0)
314 if (class == MODE_COMPLEX_FLOAT
)
315 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
316 realr
, unsignedp
, methods
);
318 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
319 real_t
, divisor
, realr
, unsignedp
);
325 emit_move_insn (realr
, res
);
327 if (class == MODE_COMPLEX_FLOAT
)
328 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
329 imagr
, unsignedp
, methods
);
331 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
332 imag_t
, divisor
, imagr
, unsignedp
);
338 emit_move_insn (imagr
, res
);
343 /* Generate code to perform a wide-input-range-acceptable complex divide. */
346 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
347 unsignedp
, methods
, class, binoptab
)
348 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
349 enum machine_mode submode
;
351 enum optab_methods methods
;
352 enum mode_class
class;
357 rtx temp1
, temp2
, lab1
, lab2
;
358 enum machine_mode mode
;
360 optab this_add_optab
= add_optab
;
361 optab this_sub_optab
= sub_optab
;
362 optab this_neg_optab
= neg_optab
;
363 optab this_mul_optab
= smul_optab
;
365 if (binoptab
== sdivv_optab
)
367 this_add_optab
= addv_optab
;
368 this_sub_optab
= subv_optab
;
369 this_neg_optab
= negv_optab
;
370 this_mul_optab
= smulv_optab
;
373 /* Don't fetch these from memory more than once. */
374 real0
= force_reg (submode
, real0
);
375 real1
= force_reg (submode
, real1
);
378 imag0
= force_reg (submode
, imag0
);
380 imag1
= force_reg (submode
, imag1
);
382 /* XXX What's an "unsigned" complex number? */
390 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
391 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
394 if (temp1
== 0 || temp2
== 0)
397 mode
= GET_MODE (temp1
);
398 lab1
= gen_label_rtx ();
399 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
400 mode
, unsignedp
, lab1
);
402 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
404 if (class == MODE_COMPLEX_FLOAT
)
405 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
406 NULL_RTX
, unsignedp
, methods
);
408 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
409 imag1
, real1
, NULL_RTX
, unsignedp
);
414 /* Calculate divisor. */
416 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
417 NULL_RTX
, unsignedp
, methods
);
422 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
423 NULL_RTX
, unsignedp
, methods
);
428 /* Calculate dividend. */
434 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
436 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
437 NULL_RTX
, unsignedp
, methods
);
442 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
443 NULL_RTX
, unsignedp
);
445 if (real_t
== 0 || imag_t
== 0)
450 /* Compute (a+ib)/(c+id) as
451 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
453 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
454 NULL_RTX
, unsignedp
, methods
);
459 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
460 NULL_RTX
, unsignedp
, methods
);
462 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
463 NULL_RTX
, unsignedp
, methods
);
468 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
469 NULL_RTX
, unsignedp
, methods
);
471 if (real_t
== 0 || imag_t
== 0)
475 if (class == MODE_COMPLEX_FLOAT
)
476 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
477 realr
, unsignedp
, methods
);
479 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
480 real_t
, divisor
, realr
, unsignedp
);
486 emit_move_insn (realr
, res
);
488 if (class == MODE_COMPLEX_FLOAT
)
489 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
490 imagr
, unsignedp
, methods
);
492 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
493 imag_t
, divisor
, imagr
, unsignedp
);
499 emit_move_insn (imagr
, res
);
501 lab2
= gen_label_rtx ();
502 emit_jump_insn (gen_jump (lab2
));
507 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
509 if (class == MODE_COMPLEX_FLOAT
)
510 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
511 NULL_RTX
, unsignedp
, methods
);
513 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
514 real1
, imag1
, NULL_RTX
, unsignedp
);
519 /* Calculate divisor. */
521 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
522 NULL_RTX
, unsignedp
, methods
);
527 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
528 NULL_RTX
, unsignedp
, methods
);
533 /* Calculate dividend. */
537 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
539 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
540 NULL_RTX
, unsignedp
, methods
);
542 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
543 NULL_RTX
, unsignedp
);
545 if (real_t
== 0 || imag_t
== 0)
550 /* Compute (a+ib)/(c+id) as
551 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
553 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
554 NULL_RTX
, unsignedp
, methods
);
559 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
560 NULL_RTX
, unsignedp
, methods
);
562 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
563 NULL_RTX
, unsignedp
, methods
);
568 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
569 NULL_RTX
, unsignedp
, methods
);
571 if (real_t
== 0 || imag_t
== 0)
575 if (class == MODE_COMPLEX_FLOAT
)
576 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
577 realr
, unsignedp
, methods
);
579 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
580 real_t
, divisor
, realr
, unsignedp
);
586 emit_move_insn (realr
, res
);
588 if (class == MODE_COMPLEX_FLOAT
)
589 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
590 imagr
, unsignedp
, methods
);
592 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
593 imag_t
, divisor
, imagr
, unsignedp
);
599 emit_move_insn (imagr
, res
);
606 /* Wrapper around expand_binop which takes an rtx code to specify
607 the operation to perform, not an optab pointer. All other
608 arguments are the same. */
610 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
611 enum machine_mode mode
;
616 enum optab_methods methods
;
618 optab binop
= code_to_optab
[(int) code
];
622 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
625 /* Generate code to perform an operation specified by BINOPTAB
626 on operands OP0 and OP1, with result having machine-mode MODE.
628 UNSIGNEDP is for the case where we have to widen the operands
629 to perform the operation. It says to use zero-extension.
631 If TARGET is nonzero, the value
632 is generated there, if it is convenient to do so.
633 In all cases an rtx is returned for the locus of the value;
634 this may or may not be TARGET. */
637 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
638 enum machine_mode mode
;
643 enum optab_methods methods
;
645 enum optab_methods next_methods
646 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
647 ? OPTAB_WIDEN
: methods
);
648 enum mode_class
class;
649 enum machine_mode wider_mode
;
651 int commutative_op
= 0;
652 int shift_op
= (binoptab
->code
== ASHIFT
653 || binoptab
->code
== ASHIFTRT
654 || binoptab
->code
== LSHIFTRT
655 || binoptab
->code
== ROTATE
656 || binoptab
->code
== ROTATERT
);
657 rtx entry_last
= get_last_insn ();
660 class = GET_MODE_CLASS (mode
);
662 op0
= protect_from_queue (op0
, 0);
663 op1
= protect_from_queue (op1
, 0);
665 target
= protect_from_queue (target
, 1);
669 op0
= force_not_mem (op0
);
670 op1
= force_not_mem (op1
);
673 /* If subtracting an integer constant, convert this into an addition of
674 the negated constant. */
676 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
678 op1
= negate_rtx (mode
, op1
);
679 binoptab
= add_optab
;
682 /* If we are inside an appropriately-short loop and one operand is an
683 expensive constant, force it into a register. */
684 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
685 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
686 op0
= force_reg (mode
, op0
);
688 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
689 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
690 op1
= force_reg (mode
, op1
);
692 /* Record where to delete back to if we backtrack. */
693 last
= get_last_insn ();
695 /* If operation is commutative,
696 try to make the first operand a register.
697 Even better, try to make it the same as the target.
698 Also try to make the last operand a constant. */
699 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
700 || binoptab
== smul_widen_optab
701 || binoptab
== umul_widen_optab
702 || binoptab
== smul_highpart_optab
703 || binoptab
== umul_highpart_optab
)
707 if (((target
== 0 || GET_CODE (target
) == REG
)
708 ? ((GET_CODE (op1
) == REG
709 && GET_CODE (op0
) != REG
)
711 : rtx_equal_p (op1
, target
))
712 || GET_CODE (op0
) == CONST_INT
)
720 /* If we can do it with a three-operand insn, do so. */
722 if (methods
!= OPTAB_MUST_WIDEN
723 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
725 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
726 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
727 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
729 rtx xop0
= op0
, xop1
= op1
;
734 temp
= gen_reg_rtx (mode
);
736 /* If it is a commutative operator and the modes would match
737 if we would swap the operands, we can save the conversions. */
740 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
741 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
745 tmp
= op0
; op0
= op1
; op1
= tmp
;
746 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
750 /* In case the insn wants input operands in modes different from
751 the result, convert the operands. It would seem that we
752 don't need to convert CONST_INTs, but we do, so that they're
753 a properly sign-extended for their modes; we choose the
754 widest mode between mode and mode[01], so that, in a widening
755 operation, we call convert_modes with different FROM and TO
756 modes, which ensures the value is sign-extended. Shift
757 operations are an exception, because the second operand needs
758 not be extended to the mode of the result. */
760 if (GET_MODE (op0
) != mode0
761 && mode0
!= VOIDmode
)
762 xop0
= convert_modes (mode0
,
763 GET_MODE (op0
) != VOIDmode
765 : GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode0
)
770 if (GET_MODE (xop1
) != mode1
771 && mode1
!= VOIDmode
)
772 xop1
= convert_modes (mode1
,
773 GET_MODE (op1
) != VOIDmode
775 : (GET_MODE_SIZE (mode
) > GET_MODE_SIZE (mode1
)
781 /* Now, if insn's predicates don't allow our operands, put them into
784 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
785 && mode0
!= VOIDmode
)
786 xop0
= copy_to_mode_reg (mode0
, xop0
);
788 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
789 && mode1
!= VOIDmode
)
790 xop1
= copy_to_mode_reg (mode1
, xop1
);
792 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
793 temp
= gen_reg_rtx (mode
);
795 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
798 /* If PAT is a multi-insn sequence, try to add an appropriate
799 REG_EQUAL note to it. If we can't because TEMP conflicts with an
800 operand, call ourselves again, this time without a target. */
801 if (GET_CODE (pat
) == SEQUENCE
802 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
804 delete_insns_since (last
);
805 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
813 delete_insns_since (last
);
816 /* If this is a multiply, see if we can do a widening operation that
817 takes operands of this mode and makes a wider mode. */
819 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
820 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
821 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
822 != CODE_FOR_nothing
))
824 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
825 unsignedp
? umul_widen_optab
: smul_widen_optab
,
826 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
830 if (GET_MODE_CLASS (mode
) == MODE_INT
)
831 return gen_lowpart (mode
, temp
);
833 return convert_to_mode (mode
, temp
, unsignedp
);
837 /* Look for a wider mode of the same class for which we think we
838 can open-code the operation. Check for a widening multiply at the
839 wider mode as well. */
841 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
842 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
843 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
844 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
846 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
847 || (binoptab
== smul_optab
848 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
849 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
850 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
851 != CODE_FOR_nothing
)))
853 rtx xop0
= op0
, xop1
= op1
;
856 /* For certain integer operations, we need not actually extend
857 the narrow operands, as long as we will truncate
858 the results to the same narrowness. */
860 if ((binoptab
== ior_optab
|| binoptab
== and_optab
861 || binoptab
== xor_optab
862 || binoptab
== add_optab
|| binoptab
== sub_optab
863 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
864 && class == MODE_INT
)
867 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
869 /* The second operand of a shift must always be extended. */
870 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
871 no_extend
&& binoptab
!= ashl_optab
);
873 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
874 unsignedp
, OPTAB_DIRECT
);
877 if (class != MODE_INT
)
880 target
= gen_reg_rtx (mode
);
881 convert_move (target
, temp
, 0);
885 return gen_lowpart (mode
, temp
);
888 delete_insns_since (last
);
892 /* These can be done a word at a time. */
893 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
895 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
896 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
902 /* If TARGET is the same as one of the operands, the REG_EQUAL note
903 won't be accurate, so use a new target. */
904 if (target
== 0 || target
== op0
|| target
== op1
)
905 target
= gen_reg_rtx (mode
);
909 /* Do the actual arithmetic. */
910 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
912 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
913 rtx x
= expand_binop (word_mode
, binoptab
,
914 operand_subword_force (op0
, i
, mode
),
915 operand_subword_force (op1
, i
, mode
),
916 target_piece
, unsignedp
, next_methods
);
921 if (target_piece
!= x
)
922 emit_move_insn (target_piece
, x
);
925 insns
= get_insns ();
928 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
930 if (binoptab
->code
!= UNKNOWN
)
932 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
933 copy_rtx (op0
), copy_rtx (op1
));
937 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
942 /* Synthesize double word shifts from single word shifts. */
943 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
944 || binoptab
== ashr_optab
)
946 && GET_CODE (op1
) == CONST_INT
947 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
948 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
949 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
950 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
952 rtx insns
, inter
, equiv_value
;
953 rtx into_target
, outof_target
;
954 rtx into_input
, outof_input
;
955 int shift_count
, left_shift
, outof_word
;
957 /* If TARGET is the same as one of the operands, the REG_EQUAL note
958 won't be accurate, so use a new target. */
959 if (target
== 0 || target
== op0
|| target
== op1
)
960 target
= gen_reg_rtx (mode
);
964 shift_count
= INTVAL (op1
);
966 /* OUTOF_* is the word we are shifting bits away from, and
967 INTO_* is the word that we are shifting bits towards, thus
968 they differ depending on the direction of the shift and
971 left_shift
= binoptab
== ashl_optab
;
972 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
974 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
975 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
977 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
978 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
980 if (shift_count
>= BITS_PER_WORD
)
982 inter
= expand_binop (word_mode
, binoptab
,
984 GEN_INT (shift_count
- BITS_PER_WORD
),
985 into_target
, unsignedp
, next_methods
);
987 if (inter
!= 0 && inter
!= into_target
)
988 emit_move_insn (into_target
, inter
);
990 /* For a signed right shift, we must fill the word we are shifting
991 out of with copies of the sign bit. Otherwise it is zeroed. */
992 if (inter
!= 0 && binoptab
!= ashr_optab
)
993 inter
= CONST0_RTX (word_mode
);
995 inter
= expand_binop (word_mode
, binoptab
,
997 GEN_INT (BITS_PER_WORD
- 1),
998 outof_target
, unsignedp
, next_methods
);
1000 if (inter
!= 0 && inter
!= outof_target
)
1001 emit_move_insn (outof_target
, inter
);
1006 optab reverse_unsigned_shift
, unsigned_shift
;
1008 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1009 we must do a logical shift in the opposite direction of the
1012 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1014 /* For a shift of less than BITS_PER_WORD, to compute the word
1015 shifted towards, we need to unsigned shift the orig value of
1018 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1020 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1022 GEN_INT (BITS_PER_WORD
- shift_count
),
1023 0, unsignedp
, next_methods
);
1028 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1029 op1
, 0, unsignedp
, next_methods
);
1032 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1033 into_target
, unsignedp
, next_methods
);
1035 if (inter
!= 0 && inter
!= into_target
)
1036 emit_move_insn (into_target
, inter
);
1039 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1040 op1
, outof_target
, unsignedp
, next_methods
);
1042 if (inter
!= 0 && inter
!= outof_target
)
1043 emit_move_insn (outof_target
, inter
);
1046 insns
= get_insns ();
1051 if (binoptab
->code
!= UNKNOWN
)
1052 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1056 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1061 /* Synthesize double word rotates from single word shifts. */
1062 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1063 && class == MODE_INT
1064 && GET_CODE (op1
) == CONST_INT
1065 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1066 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1067 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1069 rtx insns
, equiv_value
;
1070 rtx into_target
, outof_target
;
1071 rtx into_input
, outof_input
;
1073 int shift_count
, left_shift
, outof_word
;
1075 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1076 won't be accurate, so use a new target. */
1077 if (target
== 0 || target
== op0
|| target
== op1
)
1078 target
= gen_reg_rtx (mode
);
1082 shift_count
= INTVAL (op1
);
1084 /* OUTOF_* is the word we are shifting bits away from, and
1085 INTO_* is the word that we are shifting bits towards, thus
1086 they differ depending on the direction of the shift and
1087 WORDS_BIG_ENDIAN. */
1089 left_shift
= (binoptab
== rotl_optab
);
1090 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1092 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1093 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1095 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1096 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1098 if (shift_count
== BITS_PER_WORD
)
1100 /* This is just a word swap. */
1101 emit_move_insn (outof_target
, into_input
);
1102 emit_move_insn (into_target
, outof_input
);
1107 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1108 rtx first_shift_count
, second_shift_count
;
1109 optab reverse_unsigned_shift
, unsigned_shift
;
1111 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1112 ? lshr_optab
: ashl_optab
);
1114 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1115 ? ashl_optab
: lshr_optab
);
1117 if (shift_count
> BITS_PER_WORD
)
1119 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1120 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
1124 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1125 second_shift_count
= GEN_INT (shift_count
);
1128 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1129 outof_input
, first_shift_count
,
1130 NULL_RTX
, unsignedp
, next_methods
);
1131 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1132 into_input
, second_shift_count
,
1133 into_target
, unsignedp
, next_methods
);
1135 if (into_temp1
!= 0 && into_temp2
!= 0)
1136 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1137 into_target
, unsignedp
, next_methods
);
1141 if (inter
!= 0 && inter
!= into_target
)
1142 emit_move_insn (into_target
, inter
);
1144 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1145 into_input
, first_shift_count
,
1146 NULL_RTX
, unsignedp
, next_methods
);
1147 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1148 outof_input
, second_shift_count
,
1149 outof_target
, unsignedp
, next_methods
);
1151 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1152 inter
= expand_binop (word_mode
, ior_optab
,
1153 outof_temp1
, outof_temp2
,
1154 outof_target
, unsignedp
, next_methods
);
1156 if (inter
!= 0 && inter
!= outof_target
)
1157 emit_move_insn (outof_target
, inter
);
1160 insns
= get_insns ();
1165 if (binoptab
->code
!= UNKNOWN
)
1166 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1170 /* We can't make this a no conflict block if this is a word swap,
1171 because the word swap case fails if the input and output values
1172 are in the same register. */
1173 if (shift_count
!= BITS_PER_WORD
)
1174 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1183 /* These can be done a word at a time by propagating carries. */
1184 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1185 && class == MODE_INT
1186 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1187 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1190 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1191 unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1192 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1195 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1196 value is one of those, use it. Otherwise, use 1 since it is the
1197 one easiest to get. */
1198 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1199 int normalizep
= STORE_FLAG_VALUE
;
1204 /* Prepare the operands. */
1205 xop0
= force_reg (mode
, op0
);
1206 xop1
= force_reg (mode
, op1
);
1208 if (target
== 0 || GET_CODE (target
) != REG
1209 || target
== xop0
|| target
== xop1
)
1210 target
= gen_reg_rtx (mode
);
1212 /* Indicate for flow that the entire target reg is being set. */
1213 if (GET_CODE (target
) == REG
)
1214 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
1216 /* Do the actual arithmetic. */
1217 for (i
= 0; i
< nwords
; i
++)
1219 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1220 rtx target_piece
= operand_subword (target
, index
, 1, mode
);
1221 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1222 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1225 /* Main add/subtract of the input operands. */
1226 x
= expand_binop (word_mode
, binoptab
,
1227 op0_piece
, op1_piece
,
1228 target_piece
, unsignedp
, next_methods
);
1234 /* Store carry from main add/subtract. */
1235 carry_out
= gen_reg_rtx (word_mode
);
1236 carry_out
= emit_store_flag_force (carry_out
,
1237 (binoptab
== add_optab
1240 word_mode
, 1, normalizep
);
1247 /* Add/subtract previous carry to main result. */
1248 newx
= expand_binop (word_mode
,
1249 normalizep
== 1 ? binoptab
: otheroptab
,
1251 NULL_RTX
, 1, next_methods
);
1255 /* Get out carry from adding/subtracting carry in. */
1256 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1257 carry_tmp
= emit_store_flag_force (carry_tmp
,
1258 (binoptab
== add_optab
1261 word_mode
, 1, normalizep
);
1263 /* Logical-ior the two poss. carry together. */
1264 carry_out
= expand_binop (word_mode
, ior_optab
,
1265 carry_out
, carry_tmp
,
1266 carry_out
, 0, next_methods
);
1270 emit_move_insn (target_piece
, newx
);
1273 carry_in
= carry_out
;
1276 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1278 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1280 rtx temp
= emit_move_insn (target
, target
);
1282 set_unique_reg_note (temp
,
1284 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1293 delete_insns_since (last
);
1296 /* If we want to multiply two two-word values and have normal and widening
1297 multiplies of single-word values, we can do this with three smaller
1298 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1299 because we are not operating on one word at a time.
1301 The multiplication proceeds as follows:
1302 _______________________
1303 [__op0_high_|__op0_low__]
1304 _______________________
1305 * [__op1_high_|__op1_low__]
1306 _______________________________________________
1307 _______________________
1308 (1) [__op0_low__*__op1_low__]
1309 _______________________
1310 (2a) [__op0_low__*__op1_high_]
1311 _______________________
1312 (2b) [__op0_high_*__op1_low__]
1313 _______________________
1314 (3) [__op0_high_*__op1_high_]
1317 This gives a 4-word result. Since we are only interested in the
1318 lower 2 words, partial result (3) and the upper words of (2a) and
1319 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1320 calculated using non-widening multiplication.
1322 (1), however, needs to be calculated with an unsigned widening
1323 multiplication. If this operation is not directly supported we
1324 try using a signed widening multiplication and adjust the result.
1325 This adjustment works as follows:
1327 If both operands are positive then no adjustment is needed.
1329 If the operands have different signs, for example op0_low < 0 and
1330 op1_low >= 0, the instruction treats the most significant bit of
1331 op0_low as a sign bit instead of a bit with significance
1332 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1333 with 2**BITS_PER_WORD - op0_low, and two's complements the
1334 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1337 Similarly, if both operands are negative, we need to add
1338 (op0_low + op1_low) * 2**BITS_PER_WORD.
1340 We use a trick to adjust quickly. We logically shift op0_low right
1341 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1342 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1343 logical shift exists, we do an arithmetic right shift and subtract
1346 if (binoptab
== smul_optab
1347 && class == MODE_INT
1348 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1349 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1350 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1351 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1352 != CODE_FOR_nothing
)
1353 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1354 != CODE_FOR_nothing
)))
1356 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1357 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1358 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1359 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1360 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1361 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1363 rtx op0_xhigh
= NULL_RTX
;
1364 rtx op1_xhigh
= NULL_RTX
;
1366 /* If the target is the same as one of the inputs, don't use it. This
1367 prevents problems with the REG_EQUAL note. */
1368 if (target
== op0
|| target
== op1
1369 || (target
!= 0 && GET_CODE (target
) != REG
))
1372 /* Multiply the two lower words to get a double-word product.
1373 If unsigned widening multiplication is available, use that;
1374 otherwise use the signed form and compensate. */
1376 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1378 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1379 target
, 1, OPTAB_DIRECT
);
1381 /* If we didn't succeed, delete everything we did so far. */
1383 delete_insns_since (last
);
1385 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1389 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1390 != CODE_FOR_nothing
)
1392 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1393 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1394 target
, 1, OPTAB_DIRECT
);
1395 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1396 NULL_RTX
, 1, next_methods
);
1398 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1399 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1402 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1403 NULL_RTX
, 0, next_methods
);
1405 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1406 op0_xhigh
, op0_xhigh
, 0,
1410 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1411 NULL_RTX
, 1, next_methods
);
1413 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1414 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1417 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1418 NULL_RTX
, 0, next_methods
);
1420 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1421 op1_xhigh
, op1_xhigh
, 0,
1426 /* If we have been able to directly compute the product of the
1427 low-order words of the operands and perform any required adjustments
1428 of the operands, we proceed by trying two more multiplications
1429 and then computing the appropriate sum.
1431 We have checked above that the required addition is provided.
1432 Full-word addition will normally always succeed, especially if
1433 it is provided at all, so we don't worry about its failure. The
1434 multiplication may well fail, however, so we do handle that. */
1436 if (product
&& op0_xhigh
&& op1_xhigh
)
1438 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1439 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1440 NULL_RTX
, 0, OPTAB_DIRECT
);
1443 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1444 product_high
, 0, next_methods
);
1446 if (temp
!= 0 && temp
!= product_high
)
1447 emit_move_insn (product_high
, temp
);
1450 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1451 NULL_RTX
, 0, OPTAB_DIRECT
);
1454 temp
= expand_binop (word_mode
, add_optab
, temp
,
1455 product_high
, product_high
,
1458 if (temp
!= 0 && temp
!= product_high
)
1459 emit_move_insn (product_high
, temp
);
1463 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1465 temp
= emit_move_insn (product
, product
);
1466 set_unique_reg_note (temp
,
1468 gen_rtx_fmt_ee (MULT
, mode
,
1477 /* If we get here, we couldn't do it for some reason even though we
1478 originally thought we could. Delete anything we've emitted in
1481 delete_insns_since (last
);
1484 /* We need to open-code the complex type operations: '+, -, * and /' */
1486 /* At this point we allow operations between two similar complex
1487 numbers, and also if one of the operands is not a complex number
1488 but rather of MODE_FLOAT or MODE_INT. However, the caller
1489 must make sure that the MODE of the non-complex operand matches
1490 the SUBMODE of the complex operand. */
1492 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1494 rtx real0
= 0, imag0
= 0;
1495 rtx real1
= 0, imag1
= 0;
1496 rtx realr
, imagr
, res
;
1501 /* Find the correct mode for the real and imaginary parts */
1502 enum machine_mode submode
1503 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1504 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1507 if (submode
== BLKmode
)
1511 target
= gen_reg_rtx (mode
);
1515 realr
= gen_realpart (submode
, target
);
1516 imagr
= gen_imagpart (submode
, target
);
1518 if (GET_MODE (op0
) == mode
)
1520 real0
= gen_realpart (submode
, op0
);
1521 imag0
= gen_imagpart (submode
, op0
);
1526 if (GET_MODE (op1
) == mode
)
1528 real1
= gen_realpart (submode
, op1
);
1529 imag1
= gen_imagpart (submode
, op1
);
1534 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0|| imag1
!= 0))
1537 switch (binoptab
->code
)
1540 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1542 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1543 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1544 realr
, unsignedp
, methods
);
1548 else if (res
!= realr
)
1549 emit_move_insn (realr
, res
);
1552 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1553 imagr
, unsignedp
, methods
);
1556 else if (binoptab
->code
== MINUS
)
1557 res
= expand_unop (submode
,
1558 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1559 imag1
, imagr
, unsignedp
);
1565 else if (res
!= imagr
)
1566 emit_move_insn (imagr
, res
);
1572 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1578 /* Don't fetch these from memory more than once. */
1579 real0
= force_reg (submode
, real0
);
1580 real1
= force_reg (submode
, real1
);
1581 imag0
= force_reg (submode
, imag0
);
1582 imag1
= force_reg (submode
, imag1
);
1584 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1585 unsignedp
, methods
);
1587 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1588 unsignedp
, methods
);
1590 if (temp1
== 0 || temp2
== 0)
1595 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1596 temp1
, temp2
, realr
, unsignedp
, methods
));
1600 else if (res
!= realr
)
1601 emit_move_insn (realr
, res
);
1603 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1604 NULL_RTX
, unsignedp
, methods
);
1606 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1607 NULL_RTX
, unsignedp
, methods
);
1609 if (temp1
== 0 || temp2
== 0)
1614 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1615 temp1
, temp2
, imagr
, unsignedp
, methods
));
1619 else if (res
!= imagr
)
1620 emit_move_insn (imagr
, res
);
1626 /* Don't fetch these from memory more than once. */
1627 real0
= force_reg (submode
, real0
);
1628 real1
= force_reg (submode
, real1
);
1630 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1631 realr
, unsignedp
, methods
);
1634 else if (res
!= realr
)
1635 emit_move_insn (realr
, res
);
1638 res
= expand_binop (submode
, binoptab
,
1639 real1
, imag0
, imagr
, unsignedp
, methods
);
1641 res
= expand_binop (submode
, binoptab
,
1642 real0
, imag1
, imagr
, unsignedp
, methods
);
1646 else if (res
!= imagr
)
1647 emit_move_insn (imagr
, res
);
1654 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1658 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1660 /* Don't fetch these from memory more than once. */
1661 real1
= force_reg (submode
, real1
);
1663 /* Simply divide the real and imaginary parts by `c' */
1664 if (class == MODE_COMPLEX_FLOAT
)
1665 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1666 realr
, unsignedp
, methods
);
1668 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1669 real0
, real1
, realr
, unsignedp
);
1673 else if (res
!= realr
)
1674 emit_move_insn (realr
, res
);
1676 if (class == MODE_COMPLEX_FLOAT
)
1677 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1678 imagr
, unsignedp
, methods
);
1680 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1681 imag0
, real1
, imagr
, unsignedp
);
1685 else if (res
!= imagr
)
1686 emit_move_insn (imagr
, res
);
1692 switch (flag_complex_divide_method
)
1695 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1696 realr
, imagr
, submode
,
1702 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1703 realr
, imagr
, submode
,
1723 if (binoptab
->code
!= UNKNOWN
)
1725 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1726 copy_rtx (op0
), copy_rtx (op1
));
1730 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1736 /* It can't be open-coded in this mode.
1737 Use a library call if one is available and caller says that's ok. */
1739 if (binoptab
->handlers
[(int) mode
].libfunc
1740 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1744 enum machine_mode op1_mode
= mode
;
1751 op1_mode
= word_mode
;
1752 /* Specify unsigned here,
1753 since negative shift counts are meaningless. */
1754 op1x
= convert_to_mode (word_mode
, op1
, 1);
1757 if (GET_MODE (op0
) != VOIDmode
1758 && GET_MODE (op0
) != mode
)
1759 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1761 /* Pass 1 for NO_QUEUE so we don't lose any increments
1762 if the libcall is cse'd or moved. */
1763 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1764 NULL_RTX
, LCT_CONST
, mode
, 2,
1765 op0
, mode
, op1x
, op1_mode
);
1767 insns
= get_insns ();
1770 target
= gen_reg_rtx (mode
);
1771 emit_libcall_block (insns
, target
, value
,
1772 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1777 delete_insns_since (last
);
1779 /* It can't be done in this mode. Can we do it in a wider mode? */
1781 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1782 || methods
== OPTAB_MUST_WIDEN
))
1784 /* Caller says, don't even try. */
1785 delete_insns_since (entry_last
);
1789 /* Compute the value of METHODS to pass to recursive calls.
1790 Don't allow widening to be tried recursively. */
1792 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1794 /* Look for a wider mode of the same class for which it appears we can do
1797 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1799 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1800 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1802 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1803 != CODE_FOR_nothing
)
1804 || (methods
== OPTAB_LIB
1805 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1807 rtx xop0
= op0
, xop1
= op1
;
1810 /* For certain integer operations, we need not actually extend
1811 the narrow operands, as long as we will truncate
1812 the results to the same narrowness. */
1814 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1815 || binoptab
== xor_optab
1816 || binoptab
== add_optab
|| binoptab
== sub_optab
1817 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1818 && class == MODE_INT
)
1821 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1822 unsignedp
, no_extend
);
1824 /* The second operand of a shift must always be extended. */
1825 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1826 no_extend
&& binoptab
!= ashl_optab
);
1828 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1829 unsignedp
, methods
);
1832 if (class != MODE_INT
)
1835 target
= gen_reg_rtx (mode
);
1836 convert_move (target
, temp
, 0);
1840 return gen_lowpart (mode
, temp
);
1843 delete_insns_since (last
);
1848 delete_insns_since (entry_last
);
1852 /* Expand a binary operator which has both signed and unsigned forms.
1853 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1856 If we widen unsigned operands, we may use a signed wider operation instead
1857 of an unsigned wider operation, since the result would be the same. */
1860 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1861 enum machine_mode mode
;
1862 optab uoptab
, soptab
;
1863 rtx op0
, op1
, target
;
1865 enum optab_methods methods
;
1868 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1869 struct optab wide_soptab
;
1871 /* Do it without widening, if possible. */
1872 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1873 unsignedp
, OPTAB_DIRECT
);
1874 if (temp
|| methods
== OPTAB_DIRECT
)
1877 /* Try widening to a signed int. Make a fake signed optab that
1878 hides any signed insn for direct use. */
1879 wide_soptab
= *soptab
;
1880 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1881 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1883 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1884 unsignedp
, OPTAB_WIDEN
);
1886 /* For unsigned operands, try widening to an unsigned int. */
1887 if (temp
== 0 && unsignedp
)
1888 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1889 unsignedp
, OPTAB_WIDEN
);
1890 if (temp
|| methods
== OPTAB_WIDEN
)
1893 /* Use the right width lib call if that exists. */
1894 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1895 if (temp
|| methods
== OPTAB_LIB
)
1898 /* Must widen and use a lib call, use either signed or unsigned. */
1899 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1900 unsignedp
, methods
);
1904 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1905 unsignedp
, methods
);
1909 /* Generate code to perform an operation specified by BINOPTAB
1910 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1911 We assume that the order of the operands for the instruction
1912 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1913 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1915 Either TARG0 or TARG1 may be zero, but what that means is that
1916 the result is not actually wanted. We will generate it into
1917 a dummy pseudo-reg and discard it. They may not both be zero.
1919 Returns 1 if this operation can be performed; 0 if not. */
1922 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1928 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1929 enum mode_class
class;
1930 enum machine_mode wider_mode
;
1931 rtx entry_last
= get_last_insn ();
1934 class = GET_MODE_CLASS (mode
);
1936 op0
= protect_from_queue (op0
, 0);
1937 op1
= protect_from_queue (op1
, 0);
1941 op0
= force_not_mem (op0
);
1942 op1
= force_not_mem (op1
);
1945 /* If we are inside an appropriately-short loop and one operand is an
1946 expensive constant, force it into a register. */
1947 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1948 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1949 op0
= force_reg (mode
, op0
);
1951 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1952 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1953 op1
= force_reg (mode
, op1
);
1956 targ0
= protect_from_queue (targ0
, 1);
1958 targ0
= gen_reg_rtx (mode
);
1960 targ1
= protect_from_queue (targ1
, 1);
1962 targ1
= gen_reg_rtx (mode
);
1964 /* Record where to go back to if we fail. */
1965 last
= get_last_insn ();
1967 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1969 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1970 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1971 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1973 rtx xop0
= op0
, xop1
= op1
;
1975 /* In case this insn wants input operands in modes different from the
1976 result, convert the operands. */
1977 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
1978 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1980 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
1981 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
1983 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1984 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1985 xop0
= copy_to_mode_reg (mode0
, xop0
);
1987 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1988 xop1
= copy_to_mode_reg (mode1
, xop1
);
1990 /* We could handle this, but we should always be called with a pseudo
1991 for our targets and all insns should take them as outputs. */
1992 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1993 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1996 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2003 delete_insns_since (last
);
2006 /* It can't be done in this mode. Can we do it in a wider mode? */
2008 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2010 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2011 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2013 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2014 != CODE_FOR_nothing
)
2016 rtx t0
= gen_reg_rtx (wider_mode
);
2017 rtx t1
= gen_reg_rtx (wider_mode
);
2018 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2019 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2021 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2024 convert_move (targ0
, t0
, unsignedp
);
2025 convert_move (targ1
, t1
, unsignedp
);
2029 delete_insns_since (last
);
2034 delete_insns_since (entry_last
);
2038 /* Wrapper around expand_unop which takes an rtx code to specify
2039 the operation to perform, not an optab pointer. All other
2040 arguments are the same. */
2042 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2043 enum machine_mode mode
;
2049 optab unop
= code_to_optab
[(int) code
];
2053 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2056 /* Generate code to perform an operation specified by UNOPTAB
2057 on operand OP0, with result having machine-mode MODE.
2059 UNSIGNEDP is for the case where we have to widen the operands
2060 to perform the operation. It says to use zero-extension.
2062 If TARGET is nonzero, the value
2063 is generated there, if it is convenient to do so.
2064 In all cases an rtx is returned for the locus of the value;
2065 this may or may not be TARGET. */
2068 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2069 enum machine_mode mode
;
2075 enum mode_class
class;
2076 enum machine_mode wider_mode
;
2078 rtx last
= get_last_insn ();
2081 class = GET_MODE_CLASS (mode
);
2083 op0
= protect_from_queue (op0
, 0);
2087 op0
= force_not_mem (op0
);
2091 target
= protect_from_queue (target
, 1);
2093 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2095 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2096 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2102 temp
= gen_reg_rtx (mode
);
2104 if (GET_MODE (xop0
) != VOIDmode
2105 && GET_MODE (xop0
) != mode0
)
2106 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2108 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2110 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2111 xop0
= copy_to_mode_reg (mode0
, xop0
);
2113 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2114 temp
= gen_reg_rtx (mode
);
2116 pat
= GEN_FCN (icode
) (temp
, xop0
);
2119 if (GET_CODE (pat
) == SEQUENCE
2120 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2122 delete_insns_since (last
);
2123 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2131 delete_insns_since (last
);
2134 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2136 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2137 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2138 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2140 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2144 /* For certain operations, we need not actually extend
2145 the narrow operand, as long as we will truncate the
2146 results to the same narrowness. */
2148 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2149 (unoptab
== neg_optab
2150 || unoptab
== one_cmpl_optab
)
2151 && class == MODE_INT
);
2153 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2158 if (class != MODE_INT
)
2161 target
= gen_reg_rtx (mode
);
2162 convert_move (target
, temp
, 0);
2166 return gen_lowpart (mode
, temp
);
2169 delete_insns_since (last
);
2173 /* These can be done a word at a time. */
2174 if (unoptab
== one_cmpl_optab
2175 && class == MODE_INT
2176 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2177 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2182 if (target
== 0 || target
== op0
)
2183 target
= gen_reg_rtx (mode
);
2187 /* Do the actual arithmetic. */
2188 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2190 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2191 rtx x
= expand_unop (word_mode
, unoptab
,
2192 operand_subword_force (op0
, i
, mode
),
2193 target_piece
, unsignedp
);
2195 if (target_piece
!= x
)
2196 emit_move_insn (target_piece
, x
);
2199 insns
= get_insns ();
2202 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2203 gen_rtx_fmt_e (unoptab
->code
, mode
,
2208 /* Open-code the complex negation operation. */
2209 else if (unoptab
->code
== NEG
2210 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2216 /* Find the correct mode for the real and imaginary parts */
2217 enum machine_mode submode
2218 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2219 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2222 if (submode
== BLKmode
)
2226 target
= gen_reg_rtx (mode
);
2230 target_piece
= gen_imagpart (submode
, target
);
2231 x
= expand_unop (submode
, unoptab
,
2232 gen_imagpart (submode
, op0
),
2233 target_piece
, unsignedp
);
2234 if (target_piece
!= x
)
2235 emit_move_insn (target_piece
, x
);
2237 target_piece
= gen_realpart (submode
, target
);
2238 x
= expand_unop (submode
, unoptab
,
2239 gen_realpart (submode
, op0
),
2240 target_piece
, unsignedp
);
2241 if (target_piece
!= x
)
2242 emit_move_insn (target_piece
, x
);
2247 emit_no_conflict_block (seq
, target
, op0
, 0,
2248 gen_rtx_fmt_e (unoptab
->code
, mode
,
2253 /* Now try a library call in this mode. */
2254 if (unoptab
->handlers
[(int) mode
].libfunc
)
2261 /* Pass 1 for NO_QUEUE so we don't lose any increments
2262 if the libcall is cse'd or moved. */
2263 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2264 NULL_RTX
, LCT_CONST
, mode
, 1, op0
, mode
);
2265 insns
= get_insns ();
2268 target
= gen_reg_rtx (mode
);
2269 emit_libcall_block (insns
, target
, value
,
2270 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2275 /* It can't be done in this mode. Can we do it in a wider mode? */
2277 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2279 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2280 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2282 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2283 != CODE_FOR_nothing
)
2284 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2288 /* For certain operations, we need not actually extend
2289 the narrow operand, as long as we will truncate the
2290 results to the same narrowness. */
2292 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2293 (unoptab
== neg_optab
2294 || unoptab
== one_cmpl_optab
)
2295 && class == MODE_INT
);
2297 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2302 if (class != MODE_INT
)
2305 target
= gen_reg_rtx (mode
);
2306 convert_move (target
, temp
, 0);
2310 return gen_lowpart (mode
, temp
);
2313 delete_insns_since (last
);
2318 /* If there is no negate operation, try doing a subtract from zero.
2319 The US Software GOFAST library needs this. */
2320 if (unoptab
->code
== NEG
)
2323 temp
= expand_binop (mode
,
2324 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2325 CONST0_RTX (mode
), op0
,
2326 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2334 /* Emit code to compute the absolute value of OP0, with result to
2335 TARGET if convenient. (TARGET may be 0.) The return value says
2336 where the result actually is to be found.
2338 MODE is the mode of the operand; the mode of the result is
2339 different but can be deduced from MODE.
2344 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2345 enum machine_mode mode
;
2348 int result_unsignedp
;
2354 result_unsignedp
= 1;
2356 /* First try to do it with a special abs instruction. */
2357 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2362 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2363 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2365 rtx last
= get_last_insn ();
2367 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2369 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2375 delete_insns_since (last
);
2378 /* If this machine has expensive jumps, we can do integer absolute
2379 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2380 where W is the width of MODE. */
2382 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2384 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2385 size_int (GET_MODE_BITSIZE (mode
) - 1),
2388 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2391 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2392 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2398 /* If that does not win, use conditional jump and negate. */
2400 /* It is safe to use the target if it is the same
2401 as the source if this is also a pseudo register */
2402 if (op0
== target
&& GET_CODE (op0
) == REG
2403 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2406 op1
= gen_label_rtx ();
2407 if (target
== 0 || ! safe
2408 || GET_MODE (target
) != mode
2409 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2410 || (GET_CODE (target
) == REG
2411 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2412 target
= gen_reg_rtx (mode
);
2414 emit_move_insn (target
, op0
);
2417 /* If this mode is an integer too wide to compare properly,
2418 compare word by word. Rely on CSE to optimize constant cases. */
2419 if (GET_MODE_CLASS (mode
) == MODE_INT
2420 && ! can_compare_p (GE
, mode
, ccp_jump
))
2421 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2424 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2425 NULL_RTX
, NULL_RTX
, op1
);
2427 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2430 emit_move_insn (target
, op0
);
2436 /* Emit code to compute the absolute value of OP0, with result to
2437 TARGET if convenient. (TARGET may be 0.) The return value says
2438 where the result actually is to be found.
2440 MODE is the mode of the operand; the mode of the result is
2441 different but can be deduced from MODE.
2443 UNSIGNEDP is relevant for complex integer modes. */
2446 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2447 enum machine_mode mode
;
2452 enum mode_class
class = GET_MODE_CLASS (mode
);
2453 enum machine_mode wider_mode
;
2455 rtx entry_last
= get_last_insn ();
2458 optab this_abs_optab
;
2460 /* Find the correct mode for the real and imaginary parts. */
2461 enum machine_mode submode
2462 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2463 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2466 if (submode
== BLKmode
)
2469 op0
= protect_from_queue (op0
, 0);
2473 op0
= force_not_mem (op0
);
2476 last
= get_last_insn ();
2479 target
= protect_from_queue (target
, 1);
2481 this_abs_optab
= ! unsignedp
&& flag_trapv
2482 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2483 ? absv_optab
: abs_optab
;
2485 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2487 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2488 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2494 temp
= gen_reg_rtx (submode
);
2496 if (GET_MODE (xop0
) != VOIDmode
2497 && GET_MODE (xop0
) != mode0
)
2498 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2500 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2502 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2503 xop0
= copy_to_mode_reg (mode0
, xop0
);
2505 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2506 temp
= gen_reg_rtx (submode
);
2508 pat
= GEN_FCN (icode
) (temp
, xop0
);
2511 if (GET_CODE (pat
) == SEQUENCE
2512 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2515 delete_insns_since (last
);
2516 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2525 delete_insns_since (last
);
2528 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2530 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2531 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2533 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2534 != CODE_FOR_nothing
)
2538 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2539 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2543 if (class != MODE_COMPLEX_INT
)
2546 target
= gen_reg_rtx (submode
);
2547 convert_move (target
, temp
, 0);
2551 return gen_lowpart (submode
, temp
);
2554 delete_insns_since (last
);
2558 /* Open-code the complex absolute-value operation
2559 if we can open-code sqrt. Otherwise it's not worth while. */
2560 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
2563 rtx real
, imag
, total
;
2565 real
= gen_realpart (submode
, op0
);
2566 imag
= gen_imagpart (submode
, op0
);
2568 /* Square both parts. */
2569 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2570 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2572 /* Sum the parts. */
2573 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2574 0, OPTAB_LIB_WIDEN
);
2576 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2577 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2579 delete_insns_since (last
);
2584 /* Now try a library call in this mode. */
2585 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
2592 /* Pass 1 for NO_QUEUE so we don't lose any increments
2593 if the libcall is cse'd or moved. */
2594 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2595 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
2596 insns
= get_insns ();
2599 target
= gen_reg_rtx (submode
);
2600 emit_libcall_block (insns
, target
, value
,
2601 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
2606 /* It can't be done in this mode. Can we do it in a wider mode? */
2608 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2609 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2611 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2612 != CODE_FOR_nothing
)
2613 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2617 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2619 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2623 if (class != MODE_COMPLEX_INT
)
2626 target
= gen_reg_rtx (submode
);
2627 convert_move (target
, temp
, 0);
2631 return gen_lowpart (submode
, temp
);
2634 delete_insns_since (last
);
2638 delete_insns_since (entry_last
);
2642 /* Generate an instruction whose insn-code is INSN_CODE,
2643 with two operands: an output TARGET and an input OP0.
2644 TARGET *must* be nonzero, and the output is always stored there.
2645 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2646 the value that is stored into TARGET. */
2649 emit_unop_insn (icode
, target
, op0
, code
)
2656 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2659 temp
= target
= protect_from_queue (target
, 1);
2661 op0
= protect_from_queue (op0
, 0);
2663 /* Sign and zero extension from memory is often done specially on
2664 RISC machines, so forcing into a register here can pessimize
2666 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2667 op0
= force_not_mem (op0
);
2669 /* Now, if insn does not accept our operands, put them into pseudos. */
2671 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2672 op0
= copy_to_mode_reg (mode0
, op0
);
2674 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2675 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2676 temp
= gen_reg_rtx (GET_MODE (temp
));
2678 pat
= GEN_FCN (icode
) (temp
, op0
);
2680 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2681 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2686 emit_move_insn (target
, temp
);
2689 /* Emit code to perform a series of operations on a multi-word quantity, one
2692 Such a block is preceded by a CLOBBER of the output, consists of multiple
2693 insns, each setting one word of the output, and followed by a SET copying
2694 the output to itself.
2696 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2697 note indicating that it doesn't conflict with the (also multi-word)
2698 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2701 INSNS is a block of code generated to perform the operation, not including
2702 the CLOBBER and final copy. All insns that compute intermediate values
2703 are first emitted, followed by the block as described above.
2705 TARGET, OP0, and OP1 are the output and inputs of the operations,
2706 respectively. OP1 may be zero for a unary operation.
2708 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2711 If TARGET is not a register, INSNS is simply emitted with no special
2712 processing. Likewise if anything in INSNS is not an INSN or if
2713 there is a libcall block inside INSNS.
2715 The final insn emitted is returned. */
2718 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2724 rtx prev
, next
, first
, last
, insn
;
2726 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2727 return emit_insns (insns
);
2729 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2730 if (GET_CODE (insn
) != INSN
2731 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2732 return emit_insns (insns
);
2734 /* First emit all insns that do not store into words of the output and remove
2735 these from the list. */
2736 for (insn
= insns
; insn
; insn
= next
)
2741 next
= NEXT_INSN (insn
);
2743 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2744 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2745 set
= PATTERN (insn
);
2746 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2748 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2749 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2751 set
= XVECEXP (PATTERN (insn
), 0, i
);
2759 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2761 if (PREV_INSN (insn
))
2762 NEXT_INSN (PREV_INSN (insn
)) = next
;
2767 PREV_INSN (next
) = PREV_INSN (insn
);
2773 prev
= get_last_insn ();
2775 /* Now write the CLOBBER of the output, followed by the setting of each
2776 of the words, followed by the final copy. */
2777 if (target
!= op0
&& target
!= op1
)
2778 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2780 for (insn
= insns
; insn
; insn
= next
)
2782 next
= NEXT_INSN (insn
);
2785 if (op1
&& GET_CODE (op1
) == REG
)
2786 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2789 if (op0
&& GET_CODE (op0
) == REG
)
2790 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2794 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2795 != CODE_FOR_nothing
)
2797 last
= emit_move_insn (target
, target
);
2799 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2803 last
= get_last_insn ();
2805 /* Remove any existing REG_EQUAL note from "last", or else it will
2806 be mistaken for a note referring to the full contents of the
2807 alleged libcall value when found together with the REG_RETVAL
2808 note added below. An existing note can come from an insn
2809 expansion at "last". */
2810 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2814 first
= get_insns ();
2816 first
= NEXT_INSN (prev
);
2818 /* Encapsulate the block so it gets manipulated as a unit. */
2819 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2821 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
2826 /* Emit code to make a call to a constant function or a library call.
2828 INSNS is a list containing all insns emitted in the call.
2829 These insns leave the result in RESULT. Our block is to copy RESULT
2830 to TARGET, which is logically equivalent to EQUIV.
2832 We first emit any insns that set a pseudo on the assumption that these are
2833 loading constants into registers; doing so allows them to be safely cse'ed
2834 between blocks. Then we emit all the other insns in the block, followed by
2835 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2836 note with an operand of EQUIV.
2838 Moving assignments to pseudos outside of the block is done to improve
2839 the generated code, but is not required to generate correct code,
2840 hence being unable to move an assignment is not grounds for not making
2841 a libcall block. There are two reasons why it is safe to leave these
2842 insns inside the block: First, we know that these pseudos cannot be
2843 used in generated RTL outside the block since they are created for
2844 temporary purposes within the block. Second, CSE will not record the
2845 values of anything set inside a libcall block, so we know they must
2846 be dead at the end of the block.
2848 Except for the first group of insns (the ones setting pseudos), the
2849 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2852 emit_libcall_block (insns
, target
, result
, equiv
)
2858 rtx final_dest
= target
;
2859 rtx prev
, next
, first
, last
, insn
;
2861 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
2862 into a MEM later. Protect the libcall block from this change. */
2863 if (! REG_P (target
) || REG_USERVAR_P (target
))
2864 target
= gen_reg_rtx (GET_MODE (target
));
2866 /* If we're using non-call exceptions, a libcall corresponding to an
2867 operation that may trap may also trap. */
2868 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
2870 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2871 if (GET_CODE (insn
) == CALL_INSN
)
2873 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2875 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
2876 remove_note (insn
, note
);
2880 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
2881 reg note to indicate that this call cannot throw or execute a nonlocal
2882 goto (unless there is already a REG_EH_REGION note, in which case
2884 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2885 if (GET_CODE (insn
) == CALL_INSN
)
2887 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2890 XEXP (note
, 0) = GEN_INT (-1);
2892 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
2896 /* First emit all insns that set pseudos. Remove them from the list as
2897 we go. Avoid insns that set pseudos which were referenced in previous
2898 insns. These can be generated by move_by_pieces, for example,
2899 to update an address. Similarly, avoid insns that reference things
2900 set in previous insns. */
2902 for (insn
= insns
; insn
; insn
= next
)
2904 rtx set
= single_set (insn
);
2906 next
= NEXT_INSN (insn
);
2908 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2909 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2911 || ((! INSN_P(insns
)
2912 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
2913 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2914 && ! modified_in_p (SET_SRC (set
), insns
)
2915 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2917 if (PREV_INSN (insn
))
2918 NEXT_INSN (PREV_INSN (insn
)) = next
;
2923 PREV_INSN (next
) = PREV_INSN (insn
);
2929 prev
= get_last_insn ();
2931 /* Write the remaining insns followed by the final copy. */
2933 for (insn
= insns
; insn
; insn
= next
)
2935 next
= NEXT_INSN (insn
);
2940 last
= emit_move_insn (target
, result
);
2941 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2942 != CODE_FOR_nothing
)
2943 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
2946 /* Remove any existing REG_EQUAL note from "last", or else it will
2947 be mistaken for a note referring to the full contents of the
2948 libcall value when found together with the REG_RETVAL note added
2949 below. An existing note can come from an insn expansion at
2951 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2954 if (final_dest
!= target
)
2955 emit_move_insn (final_dest
, target
);
2958 first
= get_insns ();
2960 first
= NEXT_INSN (prev
);
2962 /* Encapsulate the block so it gets manipulated as a unit. */
2963 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
2965 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2967 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
2972 /* Generate code to store zero in X. */
2978 emit_move_insn (x
, const0_rtx
);
2981 /* Generate code to store 1 in X
2982 assuming it contains zero beforehand. */
2985 emit_0_to_1_insn (x
)
2988 emit_move_insn (x
, const1_rtx
);
2991 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
2992 PURPOSE describes how this comparison will be used. CODE is the rtx
2993 comparison code we will be using.
2995 ??? Actually, CODE is slightly weaker than that. A target is still
2996 required to implement all of the normal bcc operations, but not
2997 required to implement all (or any) of the unordered bcc operations. */
3000 can_compare_p (code
, mode
, purpose
)
3002 enum machine_mode mode
;
3003 enum can_compare_purpose purpose
;
3007 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3009 if (purpose
== ccp_jump
)
3010 return bcc_gen_fctn
[(int)code
] != NULL
;
3011 else if (purpose
== ccp_store_flag
)
3012 return setcc_gen_code
[(int)code
] != CODE_FOR_nothing
;
3014 /* There's only one cmov entry point, and it's allowed to fail. */
3017 if (purpose
== ccp_jump
3018 && cbranch_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3020 if (purpose
== ccp_cmov
3021 && cmov_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3023 if (purpose
== ccp_store_flag
3024 && cstore_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
3027 mode
= GET_MODE_WIDER_MODE (mode
);
3029 while (mode
!= VOIDmode
);
3034 /* This function is called when we are going to emit a compare instruction that
3035 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3037 *PMODE is the mode of the inputs (in case they are const_int).
3038 *PUNSIGNEDP nonzero says that the operands are unsigned;
3039 this matters if they need to be widened.
3041 If they have mode BLKmode, then SIZE specifies the size of both operands.
3043 This function performs all the setup necessary so that the caller only has
3044 to emit a single comparison insn. This setup can involve doing a BLKmode
3045 comparison or emitting a library call to perform the comparison if no insn
3046 is available to handle it.
3047 The values which are passed in through pointers can be modified; the caller
3048 should perform the comparison on the modified values. */
3051 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3053 enum rtx_code
*pcomparison
;
3055 enum machine_mode
*pmode
;
3057 enum can_compare_purpose purpose
;
3059 enum machine_mode mode
= *pmode
;
3060 rtx x
= *px
, y
= *py
;
3061 int unsignedp
= *punsignedp
;
3062 enum mode_class
class;
3064 class = GET_MODE_CLASS (mode
);
3066 /* They could both be VOIDmode if both args are immediate constants,
3067 but we should fold that at an earlier stage.
3068 With no special code here, this will call abort,
3069 reminding the programmer to implement such folding. */
3071 if (mode
!= BLKmode
&& flag_force_mem
)
3073 x
= force_not_mem (x
);
3074 y
= force_not_mem (y
);
3077 /* If we are inside an appropriately-short loop and one operand is an
3078 expensive constant, force it into a register. */
3079 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3080 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3081 x
= force_reg (mode
, x
);
3083 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3084 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3085 y
= force_reg (mode
, y
);
3088 /* Abort if we have a non-canonical comparison. The RTL documentation
3089 states that canonical comparisons are required only for targets which
3091 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3095 /* Don't let both operands fail to indicate the mode. */
3096 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3097 x
= force_reg (mode
, x
);
3099 /* Handle all BLKmode compares. */
3101 if (mode
== BLKmode
)
3104 enum machine_mode result_mode
;
3105 rtx opalign ATTRIBUTE_UNUSED
3106 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3109 x
= protect_from_queue (x
, 0);
3110 y
= protect_from_queue (y
, 0);
3114 #ifdef HAVE_cmpstrqi
3116 && GET_CODE (size
) == CONST_INT
3117 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3119 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3120 result
= gen_reg_rtx (result_mode
);
3121 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3125 #ifdef HAVE_cmpstrhi
3127 && GET_CODE (size
) == CONST_INT
3128 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3130 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3131 result
= gen_reg_rtx (result_mode
);
3132 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3136 #ifdef HAVE_cmpstrsi
3139 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3140 result
= gen_reg_rtx (result_mode
);
3141 size
= protect_from_queue (size
, 0);
3142 emit_insn (gen_cmpstrsi (result
, x
, y
,
3143 convert_to_mode (SImode
, size
, 1),
3149 #ifdef TARGET_MEM_FUNCTIONS
3150 emit_library_call (memcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3151 TYPE_MODE (integer_type_node
), 3,
3152 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3153 convert_to_mode (TYPE_MODE (sizetype
), size
,
3154 TREE_UNSIGNED (sizetype
)),
3155 TYPE_MODE (sizetype
));
3157 emit_library_call (bcmp_libfunc
, LCT_PURE_MAKE_BLOCK
,
3158 TYPE_MODE (integer_type_node
), 3,
3159 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3160 convert_to_mode (TYPE_MODE (integer_type_node
),
3162 TREE_UNSIGNED (integer_type_node
)),
3163 TYPE_MODE (integer_type_node
));
3166 /* Immediately move the result of the libcall into a pseudo
3167 register so reload doesn't clobber the value if it needs
3168 the return register for a spill reg. */
3169 result
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
3170 result_mode
= TYPE_MODE (integer_type_node
);
3171 emit_move_insn (result
,
3172 hard_libcall_value (result_mode
));
3176 *pmode
= result_mode
;
3182 if (can_compare_p (*pcomparison
, mode
, purpose
))
3185 /* Handle a lib call just for the mode we are using. */
3187 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3189 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3192 /* If we want unsigned, and this mode has a distinct unsigned
3193 comparison routine, use that. */
3194 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3195 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3197 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
,
3200 /* Immediately move the result of the libcall into a pseudo
3201 register so reload doesn't clobber the value if it needs
3202 the return register for a spill reg. */
3203 result
= gen_reg_rtx (word_mode
);
3204 emit_move_insn (result
, hard_libcall_value (word_mode
));
3206 /* Integer comparison returns a result that must be compared against 1,
3207 so that even if we do an unsigned compare afterward,
3208 there is still a value that can represent the result "less than". */
3215 if (class == MODE_FLOAT
)
3216 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3222 /* Before emitting an insn with code ICODE, make sure that X, which is going
3223 to be used for operand OPNUM of the insn, is converted from mode MODE to
3224 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3225 that it is accepted by the operand predicate. Return the new value. */
3228 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3232 enum machine_mode mode
, wider_mode
;
3235 x
= protect_from_queue (x
, 0);
3237 if (mode
!= wider_mode
)
3238 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3240 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3241 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3242 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3246 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3247 we can do the comparison.
3248 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3249 be NULL_RTX which indicates that only a comparison is to be generated. */
3252 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3254 enum machine_mode mode
;
3255 enum rtx_code comparison
;
3259 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3260 enum mode_class
class = GET_MODE_CLASS (mode
);
3261 enum machine_mode wider_mode
= mode
;
3263 /* Try combined insns first. */
3266 enum insn_code icode
;
3267 PUT_MODE (test
, wider_mode
);
3271 icode
= cbranch_optab
->handlers
[(int)wider_mode
].insn_code
;
3273 if (icode
!= CODE_FOR_nothing
3274 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3276 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3277 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3278 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3283 /* Handle some compares against zero. */
3284 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3285 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3287 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3288 emit_insn (GEN_FCN (icode
) (x
));
3290 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3294 /* Handle compares for which there is a directly suitable insn. */
3296 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3297 if (icode
!= CODE_FOR_nothing
)
3299 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3300 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3301 emit_insn (GEN_FCN (icode
) (x
, y
));
3303 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3307 if (class != MODE_INT
&& class != MODE_FLOAT
3308 && class != MODE_COMPLEX_FLOAT
)
3311 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3312 } while (wider_mode
!= VOIDmode
);
3317 /* Generate code to compare X with Y so that the condition codes are
3318 set and to jump to LABEL if the condition is true. If X is a
3319 constant and Y is not a constant, then the comparison is swapped to
3320 ensure that the comparison RTL has the canonical form.
3322 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3323 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3324 the proper branch condition code.
3326 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3328 MODE is the mode of the inputs (in case they are const_int).
3330 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3331 be passed unchanged to emit_cmp_insn, then potentially converted into an
3332 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3335 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3337 enum rtx_code comparison
;
3339 enum machine_mode mode
;
3343 rtx op0
= x
, op1
= y
;
3345 /* Swap operands and condition to ensure canonical RTL. */
3346 if (swap_commutative_operands_p (x
, y
))
3348 /* If we're not emitting a branch, this means some caller
3354 comparison
= swap_condition (comparison
);
3358 /* If OP0 is still a constant, then both X and Y must be constants. Force
3359 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3361 if (CONSTANT_P (op0
))
3362 op0
= force_reg (mode
, op0
);
3367 comparison
= unsigned_condition (comparison
);
3369 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3371 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3374 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3377 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3379 enum rtx_code comparison
;
3381 enum machine_mode mode
;
3384 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3387 /* Emit a library call comparison between floating point X and Y.
3388 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3391 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3393 enum rtx_code
*pcomparison
;
3394 enum machine_mode
*pmode
;
3397 enum rtx_code comparison
= *pcomparison
;
3398 rtx x
= *px
= protect_from_queue (*px
, 0);
3399 rtx y
= *py
= protect_from_queue (*py
, 0);
3400 enum machine_mode mode
= GET_MODE (x
);
3408 libfunc
= eqhf2_libfunc
;
3412 libfunc
= nehf2_libfunc
;
3416 libfunc
= gthf2_libfunc
;
3420 libfunc
= gehf2_libfunc
;
3424 libfunc
= lthf2_libfunc
;
3428 libfunc
= lehf2_libfunc
;
3432 libfunc
= unordhf2_libfunc
;
3438 else if (mode
== SFmode
)
3442 libfunc
= eqsf2_libfunc
;
3446 libfunc
= nesf2_libfunc
;
3450 libfunc
= gtsf2_libfunc
;
3454 libfunc
= gesf2_libfunc
;
3458 libfunc
= ltsf2_libfunc
;
3462 libfunc
= lesf2_libfunc
;
3466 libfunc
= unordsf2_libfunc
;
3472 else if (mode
== DFmode
)
3476 libfunc
= eqdf2_libfunc
;
3480 libfunc
= nedf2_libfunc
;
3484 libfunc
= gtdf2_libfunc
;
3488 libfunc
= gedf2_libfunc
;
3492 libfunc
= ltdf2_libfunc
;
3496 libfunc
= ledf2_libfunc
;
3500 libfunc
= unorddf2_libfunc
;
3506 else if (mode
== XFmode
)
3510 libfunc
= eqxf2_libfunc
;
3514 libfunc
= nexf2_libfunc
;
3518 libfunc
= gtxf2_libfunc
;
3522 libfunc
= gexf2_libfunc
;
3526 libfunc
= ltxf2_libfunc
;
3530 libfunc
= lexf2_libfunc
;
3534 libfunc
= unordxf2_libfunc
;
3540 else if (mode
== TFmode
)
3544 libfunc
= eqtf2_libfunc
;
3548 libfunc
= netf2_libfunc
;
3552 libfunc
= gttf2_libfunc
;
3556 libfunc
= getf2_libfunc
;
3560 libfunc
= lttf2_libfunc
;
3564 libfunc
= letf2_libfunc
;
3568 libfunc
= unordtf2_libfunc
;
3576 enum machine_mode wider_mode
;
3578 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3579 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3581 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3582 != CODE_FOR_nothing
)
3583 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3585 x
= protect_from_queue (x
, 0);
3586 y
= protect_from_queue (y
, 0);
3587 *px
= convert_to_mode (wider_mode
, x
, 0);
3588 *py
= convert_to_mode (wider_mode
, y
, 0);
3589 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3599 emit_library_call (libfunc
, LCT_CONST_MAKE_BLOCK
, word_mode
, 2, x
, mode
, y
,
3602 /* Immediately move the result of the libcall into a pseudo
3603 register so reload doesn't clobber the value if it needs
3604 the return register for a spill reg. */
3605 result
= gen_reg_rtx (word_mode
);
3606 emit_move_insn (result
, hard_libcall_value (word_mode
));
3610 if (comparison
== UNORDERED
)
3612 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
3613 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3619 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3622 emit_indirect_jump (loc
)
3625 if (! ((*insn_data
[(int)CODE_FOR_indirect_jump
].operand
[0].predicate
)
3627 loc
= copy_to_mode_reg (Pmode
, loc
);
3629 emit_jump_insn (gen_indirect_jump (loc
));
3633 #ifdef HAVE_conditional_move
3635 /* Emit a conditional move instruction if the machine supports one for that
3636 condition and machine mode.
3638 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3639 the mode to use should they be constants. If it is VOIDmode, they cannot
3642 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3643 should be stored there. MODE is the mode to use should they be constants.
3644 If it is VOIDmode, they cannot both be constants.
3646 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3647 is not supported. */
3650 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3655 enum machine_mode cmode
;
3657 enum machine_mode mode
;
3660 rtx tem
, subtarget
, comparison
, insn
;
3661 enum insn_code icode
;
3662 enum rtx_code reversed
;
3664 /* If one operand is constant, make it the second one. Only do this
3665 if the other operand is not constant as well. */
3667 if (swap_commutative_operands_p (op0
, op1
))
3672 code
= swap_condition (code
);
3675 /* get_condition will prefer to generate LT and GT even if the old
3676 comparison was against zero, so undo that canonicalization here since
3677 comparisons against zero are cheaper. */
3678 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
3679 code
= LE
, op1
= const0_rtx
;
3680 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
3681 code
= GE
, op1
= const0_rtx
;
3683 if (cmode
== VOIDmode
)
3684 cmode
= GET_MODE (op0
);
3686 if (swap_commutative_operands_p (op2
, op3
)
3687 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3696 if (mode
== VOIDmode
)
3697 mode
= GET_MODE (op2
);
3699 icode
= movcc_gen_code
[mode
];
3701 if (icode
== CODE_FOR_nothing
)
3706 op2
= force_not_mem (op2
);
3707 op3
= force_not_mem (op3
);
3711 target
= protect_from_queue (target
, 1);
3713 target
= gen_reg_rtx (mode
);
3719 op2
= protect_from_queue (op2
, 0);
3720 op3
= protect_from_queue (op3
, 0);
3722 /* If the insn doesn't accept these operands, put them in pseudos. */
3724 if (! (*insn_data
[icode
].operand
[0].predicate
)
3725 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3726 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3728 if (! (*insn_data
[icode
].operand
[2].predicate
)
3729 (op2
, insn_data
[icode
].operand
[2].mode
))
3730 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3732 if (! (*insn_data
[icode
].operand
[3].predicate
)
3733 (op3
, insn_data
[icode
].operand
[3].mode
))
3734 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3736 /* Everything should now be in the suitable form, so emit the compare insn
3737 and then the conditional move. */
3740 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3742 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3743 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3744 return NULL and let the caller figure out how best to deal with this
3746 if (GET_CODE (comparison
) != code
)
3749 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3751 /* If that failed, then give up. */
3757 if (subtarget
!= target
)
3758 convert_move (target
, subtarget
, 0);
3763 /* Return non-zero if a conditional move of mode MODE is supported.
3765 This function is for combine so it can tell whether an insn that looks
3766 like a conditional move is actually supported by the hardware. If we
3767 guess wrong we lose a bit on optimization, but that's it. */
3768 /* ??? sparc64 supports conditionally moving integers values based on fp
3769 comparisons, and vice versa. How do we handle them? */
3772 can_conditionally_move_p (mode
)
3773 enum machine_mode mode
;
3775 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3781 #endif /* HAVE_conditional_move */
3783 /* These functions generate an insn body and return it
3784 rather than emitting the insn.
3786 They do not protect from queued increments,
3787 because they may be used 1) in protect_from_queue itself
3788 and 2) in other passes where there is no queue. */
3790 /* Generate and return an insn body to add Y to X. */
3793 gen_add2_insn (x
, y
)
3796 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3798 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3799 (x
, insn_data
[icode
].operand
[0].mode
))
3800 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3801 (x
, insn_data
[icode
].operand
[1].mode
))
3802 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3803 (y
, insn_data
[icode
].operand
[2].mode
)))
3806 return (GEN_FCN (icode
) (x
, x
, y
));
3809 /* Generate and return an insn body to add r1 and c,
3810 storing the result in r0. */
3812 gen_add3_insn (r0
, r1
, c
)
3815 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3817 if (icode
== CODE_FOR_nothing
3818 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3819 (r0
, insn_data
[icode
].operand
[0].mode
))
3820 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3821 (r1
, insn_data
[icode
].operand
[1].mode
))
3822 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3823 (c
, insn_data
[icode
].operand
[2].mode
)))
3826 return (GEN_FCN (icode
) (r0
, r1
, c
));
3830 have_add2_insn (x
, y
)
3835 if (GET_MODE (x
) == VOIDmode
)
3838 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3840 if (icode
== CODE_FOR_nothing
)
3843 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3844 (x
, insn_data
[icode
].operand
[0].mode
))
3845 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3846 (x
, insn_data
[icode
].operand
[1].mode
))
3847 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3848 (y
, insn_data
[icode
].operand
[2].mode
)))
3854 /* Generate and return an insn body to subtract Y from X. */
3857 gen_sub2_insn (x
, y
)
3860 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3862 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3863 (x
, insn_data
[icode
].operand
[0].mode
))
3864 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3865 (x
, insn_data
[icode
].operand
[1].mode
))
3866 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3867 (y
, insn_data
[icode
].operand
[2].mode
)))
3870 return (GEN_FCN (icode
) (x
, x
, y
));
3873 /* Generate and return an insn body to subtract r1 and c,
3874 storing the result in r0. */
3876 gen_sub3_insn (r0
, r1
, c
)
3879 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3881 if (icode
== CODE_FOR_nothing
3882 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3883 (r0
, insn_data
[icode
].operand
[0].mode
))
3884 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3885 (r1
, insn_data
[icode
].operand
[1].mode
))
3886 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3887 (c
, insn_data
[icode
].operand
[2].mode
)))
3890 return (GEN_FCN (icode
) (r0
, r1
, c
));
3894 have_sub2_insn (x
, y
)
3899 if (GET_MODE (x
) == VOIDmode
)
3902 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3904 if (icode
== CODE_FOR_nothing
)
3907 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3908 (x
, insn_data
[icode
].operand
[0].mode
))
3909 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3910 (x
, insn_data
[icode
].operand
[1].mode
))
3911 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3912 (y
, insn_data
[icode
].operand
[2].mode
)))
3918 /* Generate the body of an instruction to copy Y into X.
3919 It may be a SEQUENCE, if one insn isn't enough. */
3922 gen_move_insn (x
, y
)
3925 enum machine_mode mode
= GET_MODE (x
);
3926 enum insn_code insn_code
;
3929 if (mode
== VOIDmode
)
3930 mode
= GET_MODE (y
);
3932 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
3934 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
3935 find a mode to do it in. If we have a movcc, use it. Otherwise,
3936 find the MODE_INT mode of the same width. */
3938 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
3940 enum machine_mode tmode
= VOIDmode
;
3944 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
3947 for (tmode
= QImode
; tmode
!= VOIDmode
;
3948 tmode
= GET_MODE_WIDER_MODE (tmode
))
3949 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
3952 if (tmode
== VOIDmode
)
3955 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
3956 may call change_address which is not appropriate if we were
3957 called when a reload was in progress. We don't have to worry
3958 about changing the address since the size in bytes is supposed to
3959 be the same. Copy the MEM to change the mode and move any
3960 substitutions from the old MEM to the new one. */
3962 if (reload_in_progress
)
3964 x
= gen_lowpart_common (tmode
, x1
);
3965 if (x
== 0 && GET_CODE (x1
) == MEM
)
3967 x
= adjust_address_nv (x1
, tmode
, 0);
3968 copy_replacements (x1
, x
);
3971 y
= gen_lowpart_common (tmode
, y1
);
3972 if (y
== 0 && GET_CODE (y1
) == MEM
)
3974 y
= adjust_address_nv (y1
, tmode
, 0);
3975 copy_replacements (y1
, y
);
3980 x
= gen_lowpart (tmode
, x
);
3981 y
= gen_lowpart (tmode
, y
);
3984 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
3985 return (GEN_FCN (insn_code
) (x
, y
));
3989 emit_move_insn_1 (x
, y
);
3990 seq
= gen_sequence ();
3995 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3996 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3997 no such operation exists, CODE_FOR_nothing will be returned. */
4000 can_extend_p (to_mode
, from_mode
, unsignedp
)
4001 enum machine_mode to_mode
, from_mode
;
4004 #ifdef HAVE_ptr_extend
4006 return CODE_FOR_ptr_extend
;
4009 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4012 /* Generate the body of an insn to extend Y (with mode MFROM)
4013 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4016 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4018 enum machine_mode mto
, mfrom
;
4021 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4024 /* can_fix_p and can_float_p say whether the target machine
4025 can directly convert a given fixed point type to
4026 a given floating point type, or vice versa.
4027 The returned value is the CODE_FOR_... value to use,
4028 or CODE_FOR_nothing if these modes cannot be directly converted.
4030 *TRUNCP_PTR is set to 1 if it is necessary to output
4031 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4033 static enum insn_code
4034 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4035 enum machine_mode fltmode
, fixmode
;
4040 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4041 != CODE_FOR_nothing
)
4042 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4044 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4047 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4049 return CODE_FOR_nothing
;
4052 static enum insn_code
4053 can_float_p (fltmode
, fixmode
, unsignedp
)
4054 enum machine_mode fixmode
, fltmode
;
4057 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4060 /* Generate code to convert FROM to floating point
4061 and store in TO. FROM must be fixed point and not VOIDmode.
4062 UNSIGNEDP nonzero means regard FROM as unsigned.
4063 Normally this is done by correcting the final value
4064 if it is negative. */
4067 expand_float (to
, from
, unsignedp
)
4071 enum insn_code icode
;
4073 enum machine_mode fmode
, imode
;
4075 /* Crash now, because we won't be able to decide which mode to use. */
4076 if (GET_MODE (from
) == VOIDmode
)
4079 /* Look for an insn to do the conversion. Do it in the specified
4080 modes if possible; otherwise convert either input, output or both to
4081 wider mode. If the integer mode is wider than the mode of FROM,
4082 we can do the conversion signed even if the input is unsigned. */
4084 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4085 imode
= GET_MODE_WIDER_MODE (imode
))
4086 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4087 fmode
= GET_MODE_WIDER_MODE (fmode
))
4089 int doing_unsigned
= unsignedp
;
4091 if (fmode
!= GET_MODE (to
)
4092 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4095 icode
= can_float_p (fmode
, imode
, unsignedp
);
4096 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4097 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4099 if (icode
!= CODE_FOR_nothing
)
4101 to
= protect_from_queue (to
, 1);
4102 from
= protect_from_queue (from
, 0);
4104 if (imode
!= GET_MODE (from
))
4105 from
= convert_to_mode (imode
, from
, unsignedp
);
4107 if (fmode
!= GET_MODE (to
))
4108 target
= gen_reg_rtx (fmode
);
4110 emit_unop_insn (icode
, target
, from
,
4111 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4114 convert_move (to
, target
, 0);
4119 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4121 /* Unsigned integer, and no way to convert directly.
4122 Convert as signed, then conditionally adjust the result. */
4125 rtx label
= gen_label_rtx ();
4127 REAL_VALUE_TYPE offset
;
4131 to
= protect_from_queue (to
, 1);
4132 from
= protect_from_queue (from
, 0);
4135 from
= force_not_mem (from
);
4137 /* Look for a usable floating mode FMODE wider than the source and at
4138 least as wide as the target. Using FMODE will avoid rounding woes
4139 with unsigned values greater than the signed maximum value. */
4141 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4142 fmode
= GET_MODE_WIDER_MODE (fmode
))
4143 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4144 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4147 if (fmode
== VOIDmode
)
4149 /* There is no such mode. Pretend the target is wide enough. */
4150 fmode
= GET_MODE (to
);
4152 /* Avoid double-rounding when TO is narrower than FROM. */
4153 if ((significand_size (fmode
) + 1)
4154 < GET_MODE_BITSIZE (GET_MODE (from
)))
4157 rtx neglabel
= gen_label_rtx ();
4159 /* Don't use TARGET if it isn't a register, is a hard register,
4160 or is the wrong mode. */
4161 if (GET_CODE (target
) != REG
4162 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4163 || GET_MODE (target
) != fmode
)
4164 target
= gen_reg_rtx (fmode
);
4166 imode
= GET_MODE (from
);
4167 do_pending_stack_adjust ();
4169 /* Test whether the sign bit is set. */
4170 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4173 /* The sign bit is not set. Convert as signed. */
4174 expand_float (target
, from
, 0);
4175 emit_jump_insn (gen_jump (label
));
4178 /* The sign bit is set.
4179 Convert to a usable (positive signed) value by shifting right
4180 one bit, while remembering if a nonzero bit was shifted
4181 out; i.e., compute (from & 1) | (from >> 1). */
4183 emit_label (neglabel
);
4184 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4185 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4186 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4188 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4190 expand_float (target
, temp
, 0);
4192 /* Multiply by 2 to undo the shift above. */
4193 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4194 target
, 0, OPTAB_LIB_WIDEN
);
4196 emit_move_insn (target
, temp
);
4198 do_pending_stack_adjust ();
4204 /* If we are about to do some arithmetic to correct for an
4205 unsigned operand, do it in a pseudo-register. */
4207 if (GET_MODE (to
) != fmode
4208 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4209 target
= gen_reg_rtx (fmode
);
4211 /* Convert as signed integer to floating. */
4212 expand_float (target
, from
, 0);
4214 /* If FROM is negative (and therefore TO is negative),
4215 correct its value by 2**bitwidth. */
4217 do_pending_stack_adjust ();
4218 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4221 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
4222 Rather than setting up a dconst_dot_5, let's hope SCO
4224 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
4225 temp
= expand_binop (fmode
, add_optab
, target
,
4226 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4227 target
, 0, OPTAB_LIB_WIDEN
);
4229 emit_move_insn (target
, temp
);
4231 do_pending_stack_adjust ();
4237 /* No hardware instruction available; call a library routine to convert from
4238 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4244 to
= protect_from_queue (to
, 1);
4245 from
= protect_from_queue (from
, 0);
4247 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4248 from
= convert_to_mode (SImode
, from
, unsignedp
);
4251 from
= force_not_mem (from
);
4253 if (GET_MODE (to
) == SFmode
)
4255 if (GET_MODE (from
) == SImode
)
4256 libfcn
= floatsisf_libfunc
;
4257 else if (GET_MODE (from
) == DImode
)
4258 libfcn
= floatdisf_libfunc
;
4259 else if (GET_MODE (from
) == TImode
)
4260 libfcn
= floattisf_libfunc
;
4264 else if (GET_MODE (to
) == DFmode
)
4266 if (GET_MODE (from
) == SImode
)
4267 libfcn
= floatsidf_libfunc
;
4268 else if (GET_MODE (from
) == DImode
)
4269 libfcn
= floatdidf_libfunc
;
4270 else if (GET_MODE (from
) == TImode
)
4271 libfcn
= floattidf_libfunc
;
4275 else if (GET_MODE (to
) == XFmode
)
4277 if (GET_MODE (from
) == SImode
)
4278 libfcn
= floatsixf_libfunc
;
4279 else if (GET_MODE (from
) == DImode
)
4280 libfcn
= floatdixf_libfunc
;
4281 else if (GET_MODE (from
) == TImode
)
4282 libfcn
= floattixf_libfunc
;
4286 else if (GET_MODE (to
) == TFmode
)
4288 if (GET_MODE (from
) == SImode
)
4289 libfcn
= floatsitf_libfunc
;
4290 else if (GET_MODE (from
) == DImode
)
4291 libfcn
= floatditf_libfunc
;
4292 else if (GET_MODE (from
) == TImode
)
4293 libfcn
= floattitf_libfunc
;
4302 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4303 GET_MODE (to
), 1, from
,
4305 insns
= get_insns ();
4308 emit_libcall_block (insns
, target
, value
,
4309 gen_rtx_FLOAT (GET_MODE (to
), from
));
4314 /* Copy result to requested destination
4315 if we have been computing in a temp location. */
4319 if (GET_MODE (target
) == GET_MODE (to
))
4320 emit_move_insn (to
, target
);
4322 convert_move (to
, target
, 0);
4326 /* expand_fix: generate code to convert FROM to fixed point
4327 and store in TO. FROM must be floating point. */
4333 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4334 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4338 expand_fix (to
, from
, unsignedp
)
4342 enum insn_code icode
;
4344 enum machine_mode fmode
, imode
;
4348 /* We first try to find a pair of modes, one real and one integer, at
4349 least as wide as FROM and TO, respectively, in which we can open-code
4350 this conversion. If the integer mode is wider than the mode of TO,
4351 we can do the conversion either signed or unsigned. */
4353 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4354 fmode
= GET_MODE_WIDER_MODE (fmode
))
4355 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4356 imode
= GET_MODE_WIDER_MODE (imode
))
4358 int doing_unsigned
= unsignedp
;
4360 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4361 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4362 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4364 if (icode
!= CODE_FOR_nothing
)
4366 to
= protect_from_queue (to
, 1);
4367 from
= protect_from_queue (from
, 0);
4369 if (fmode
!= GET_MODE (from
))
4370 from
= convert_to_mode (fmode
, from
, 0);
4373 from
= ftruncify (from
);
4375 if (imode
!= GET_MODE (to
))
4376 target
= gen_reg_rtx (imode
);
4378 emit_unop_insn (icode
, target
, from
,
4379 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4381 convert_move (to
, target
, unsignedp
);
4386 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4387 /* For an unsigned conversion, there is one more way to do it.
4388 If we have a signed conversion, we generate code that compares
4389 the real value to the largest representable positive number. If if
4390 is smaller, the conversion is done normally. Otherwise, subtract
4391 one plus the highest signed number, convert, and add it back.
4393 We only need to check all real modes, since we know we didn't find
4394 anything with a wider integer mode. */
4396 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4397 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4398 fmode
= GET_MODE_WIDER_MODE (fmode
))
4399 /* Make sure we won't lose significant bits doing this. */
4400 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
4401 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4405 REAL_VALUE_TYPE offset
;
4406 rtx limit
, lab1
, lab2
, insn
;
4408 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4409 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
4410 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4411 lab1
= gen_label_rtx ();
4412 lab2
= gen_label_rtx ();
4415 to
= protect_from_queue (to
, 1);
4416 from
= protect_from_queue (from
, 0);
4419 from
= force_not_mem (from
);
4421 if (fmode
!= GET_MODE (from
))
4422 from
= convert_to_mode (fmode
, from
, 0);
4424 /* See if we need to do the subtraction. */
4425 do_pending_stack_adjust ();
4426 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4429 /* If not, do the signed "fix" and branch around fixup code. */
4430 expand_fix (to
, from
, 0);
4431 emit_jump_insn (gen_jump (lab2
));
4434 /* Otherwise, subtract 2**(N-1), convert to signed number,
4435 then add 2**(N-1). Do the addition using XOR since this
4436 will often generate better code. */
4438 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4439 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4440 expand_fix (to
, target
, 0);
4441 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4442 GEN_INT (trunc_int_for_mode
4443 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4445 to
, 1, OPTAB_LIB_WIDEN
);
4448 emit_move_insn (to
, target
);
4452 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4453 != CODE_FOR_nothing
)
4455 /* Make a place for a REG_NOTE and add it. */
4456 insn
= emit_move_insn (to
, to
);
4457 set_unique_reg_note (insn
,
4459 gen_rtx_fmt_e (UNSIGNED_FIX
,
4468 /* We can't do it with an insn, so use a library call. But first ensure
4469 that the mode of TO is at least as wide as SImode, since those are the
4470 only library calls we know about. */
4472 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4474 target
= gen_reg_rtx (SImode
);
4476 expand_fix (target
, from
, unsignedp
);
4478 else if (GET_MODE (from
) == SFmode
)
4480 if (GET_MODE (to
) == SImode
)
4481 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
4482 else if (GET_MODE (to
) == DImode
)
4483 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
4484 else if (GET_MODE (to
) == TImode
)
4485 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
4489 else if (GET_MODE (from
) == DFmode
)
4491 if (GET_MODE (to
) == SImode
)
4492 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
4493 else if (GET_MODE (to
) == DImode
)
4494 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
4495 else if (GET_MODE (to
) == TImode
)
4496 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
4500 else if (GET_MODE (from
) == XFmode
)
4502 if (GET_MODE (to
) == SImode
)
4503 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
4504 else if (GET_MODE (to
) == DImode
)
4505 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
4506 else if (GET_MODE (to
) == TImode
)
4507 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
4511 else if (GET_MODE (from
) == TFmode
)
4513 if (GET_MODE (to
) == SImode
)
4514 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
4515 else if (GET_MODE (to
) == DImode
)
4516 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
4517 else if (GET_MODE (to
) == TImode
)
4518 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
4530 to
= protect_from_queue (to
, 1);
4531 from
= protect_from_queue (from
, 0);
4534 from
= force_not_mem (from
);
4538 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4539 GET_MODE (to
), 1, from
,
4541 insns
= get_insns ();
4544 emit_libcall_block (insns
, target
, value
,
4545 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4546 GET_MODE (to
), from
));
4551 if (GET_MODE (to
) == GET_MODE (target
))
4552 emit_move_insn (to
, target
);
4554 convert_move (to
, target
, 0);
4558 /* Report whether we have an instruction to perform the operation
4559 specified by CODE on operands of mode MODE. */
4561 have_insn_for (code
, mode
)
4563 enum machine_mode mode
;
4565 return (code_to_optab
[(int) code
] != 0
4566 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4567 != CODE_FOR_nothing
));
4570 /* Create a blank optab. */
4575 optab op
= (optab
) xmalloc (sizeof (struct optab
));
4576 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4578 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4579 op
->handlers
[i
].libfunc
= 0;
4585 /* Same, but fill in its code as CODE, and write it into the
4586 code_to_optab table. */
4591 optab op
= new_optab ();
4593 code_to_optab
[(int) code
] = op
;
4597 /* Same, but fill in its code as CODE, and do _not_ write it into
4598 the code_to_optab table. */
4603 optab op
= new_optab ();
4608 /* Initialize the libfunc fields of an entire group of entries in some
4609 optab. Each entry is set equal to a string consisting of a leading
4610 pair of underscores followed by a generic operation name followed by
4611 a mode name (downshifted to lower case) followed by a single character
4612 representing the number of operands for the given operation (which is
4613 usually one of the characters '2', '3', or '4').
4615 OPTABLE is the table in which libfunc fields are to be initialized.
4616 FIRST_MODE is the first machine mode index in the given optab to
4618 LAST_MODE is the last machine mode index in the given optab to
4620 OPNAME is the generic (string) name of the operation.
4621 SUFFIX is the character which specifies the number of operands for
4622 the given generic operation.
4626 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
4634 unsigned opname_len
= strlen (opname
);
4636 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4637 mode
= (enum machine_mode
) ((int) mode
+ 1))
4639 const char *mname
= GET_MODE_NAME(mode
);
4640 unsigned mname_len
= strlen (mname
);
4641 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4648 for (q
= opname
; *q
; )
4650 for (q
= mname
; *q
; q
++)
4651 *p
++ = TOLOWER (*q
);
4655 optable
->handlers
[(int) mode
].libfunc
4656 = gen_rtx_SYMBOL_REF (Pmode
, ggc_alloc_string (libfunc_name
,
4661 /* Initialize the libfunc fields of an entire group of entries in some
4662 optab which correspond to all integer mode operations. The parameters
4663 have the same meaning as similarly named ones for the `init_libfuncs'
4664 routine. (See above). */
4667 init_integral_libfuncs (optable
, opname
, suffix
)
4672 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
4675 /* Initialize the libfunc fields of an entire group of entries in some
4676 optab which correspond to all real mode operations. The parameters
4677 have the same meaning as similarly named ones for the `init_libfuncs'
4678 routine. (See above). */
4681 init_floating_libfuncs (optable
, opname
, suffix
)
4686 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
4690 init_one_libfunc (name
)
4693 /* Create a FUNCTION_DECL that can be passed to ENCODE_SECTION_INFO. */
4694 /* ??? We don't have any type information except for this is
4695 a function. Pretend this is "int foo()". */
4696 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4697 build_function_type (integer_type_node
, NULL_TREE
));
4698 DECL_ARTIFICIAL (decl
) = 1;
4699 DECL_EXTERNAL (decl
) = 1;
4700 TREE_PUBLIC (decl
) = 1;
4702 /* Return the symbol_ref from the mem rtx. */
4703 return XEXP (DECL_RTL (decl
), 0);
4706 /* Mark ARG (which is really an OPTAB *) for GC. */
4712 optab o
= *(optab
*) arg
;
4715 for (i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
4716 ggc_mark_rtx (o
->handlers
[i
].libfunc
);
4719 /* Call this once to initialize the contents of the optabs
4720 appropriately for the current target machine. */
4725 unsigned int i
, j
, k
;
4727 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4729 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
4730 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
4731 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
4732 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
4734 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
4735 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
4736 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
4737 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
4739 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
4740 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
4741 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
4742 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
4744 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
4745 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
4746 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
4747 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
4749 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4750 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4752 #ifdef HAVE_conditional_move
4753 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4754 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4757 add_optab
= init_optab (PLUS
);
4758 addv_optab
= init_optabv (PLUS
);
4759 sub_optab
= init_optab (MINUS
);
4760 subv_optab
= init_optabv (MINUS
);
4761 smul_optab
= init_optab (MULT
);
4762 smulv_optab
= init_optabv (MULT
);
4763 smul_highpart_optab
= init_optab (UNKNOWN
);
4764 umul_highpart_optab
= init_optab (UNKNOWN
);
4765 smul_widen_optab
= init_optab (UNKNOWN
);
4766 umul_widen_optab
= init_optab (UNKNOWN
);
4767 sdiv_optab
= init_optab (DIV
);
4768 sdivv_optab
= init_optabv (DIV
);
4769 sdivmod_optab
= init_optab (UNKNOWN
);
4770 udiv_optab
= init_optab (UDIV
);
4771 udivmod_optab
= init_optab (UNKNOWN
);
4772 smod_optab
= init_optab (MOD
);
4773 umod_optab
= init_optab (UMOD
);
4774 ftrunc_optab
= init_optab (UNKNOWN
);
4775 and_optab
= init_optab (AND
);
4776 ior_optab
= init_optab (IOR
);
4777 xor_optab
= init_optab (XOR
);
4778 ashl_optab
= init_optab (ASHIFT
);
4779 ashr_optab
= init_optab (ASHIFTRT
);
4780 lshr_optab
= init_optab (LSHIFTRT
);
4781 rotl_optab
= init_optab (ROTATE
);
4782 rotr_optab
= init_optab (ROTATERT
);
4783 smin_optab
= init_optab (SMIN
);
4784 smax_optab
= init_optab (SMAX
);
4785 umin_optab
= init_optab (UMIN
);
4786 umax_optab
= init_optab (UMAX
);
4788 /* These three have codes assigned exclusively for the sake of
4790 mov_optab
= init_optab (SET
);
4791 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4792 cmp_optab
= init_optab (COMPARE
);
4794 ucmp_optab
= init_optab (UNKNOWN
);
4795 tst_optab
= init_optab (UNKNOWN
);
4796 neg_optab
= init_optab (NEG
);
4797 negv_optab
= init_optabv (NEG
);
4798 abs_optab
= init_optab (ABS
);
4799 absv_optab
= init_optabv (ABS
);
4800 one_cmpl_optab
= init_optab (NOT
);
4801 ffs_optab
= init_optab (FFS
);
4802 sqrt_optab
= init_optab (SQRT
);
4803 sin_optab
= init_optab (UNKNOWN
);
4804 cos_optab
= init_optab (UNKNOWN
);
4805 strlen_optab
= init_optab (UNKNOWN
);
4806 cbranch_optab
= init_optab (UNKNOWN
);
4807 cmov_optab
= init_optab (UNKNOWN
);
4808 cstore_optab
= init_optab (UNKNOWN
);
4809 push_optab
= init_optab (UNKNOWN
);
4811 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4813 movstr_optab
[i
] = CODE_FOR_nothing
;
4814 clrstr_optab
[i
] = CODE_FOR_nothing
;
4816 #ifdef HAVE_SECONDARY_RELOADS
4817 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4821 /* Fill in the optabs with the insns we support. */
4824 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4825 /* This flag says the same insns that convert to a signed fixnum
4826 also convert validly to an unsigned one. */
4827 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4828 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4829 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4832 /* Initialize the optabs with the names of the library functions. */
4833 init_integral_libfuncs (add_optab
, "add", '3');
4834 init_floating_libfuncs (add_optab
, "add", '3');
4835 init_integral_libfuncs (addv_optab
, "addv", '3');
4836 init_floating_libfuncs (addv_optab
, "add", '3');
4837 init_integral_libfuncs (sub_optab
, "sub", '3');
4838 init_floating_libfuncs (sub_optab
, "sub", '3');
4839 init_integral_libfuncs (subv_optab
, "subv", '3');
4840 init_floating_libfuncs (subv_optab
, "sub", '3');
4841 init_integral_libfuncs (smul_optab
, "mul", '3');
4842 init_floating_libfuncs (smul_optab
, "mul", '3');
4843 init_integral_libfuncs (smulv_optab
, "mulv", '3');
4844 init_floating_libfuncs (smulv_optab
, "mul", '3');
4845 init_integral_libfuncs (sdiv_optab
, "div", '3');
4846 init_floating_libfuncs (sdiv_optab
, "div", '3');
4847 init_integral_libfuncs (sdivv_optab
, "divv", '3');
4848 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4849 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4850 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4851 init_integral_libfuncs (smod_optab
, "mod", '3');
4852 init_integral_libfuncs (umod_optab
, "umod", '3');
4853 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4854 init_integral_libfuncs (and_optab
, "and", '3');
4855 init_integral_libfuncs (ior_optab
, "ior", '3');
4856 init_integral_libfuncs (xor_optab
, "xor", '3');
4857 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4858 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4859 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4860 init_integral_libfuncs (smin_optab
, "min", '3');
4861 init_floating_libfuncs (smin_optab
, "min", '3');
4862 init_integral_libfuncs (smax_optab
, "max", '3');
4863 init_floating_libfuncs (smax_optab
, "max", '3');
4864 init_integral_libfuncs (umin_optab
, "umin", '3');
4865 init_integral_libfuncs (umax_optab
, "umax", '3');
4866 init_integral_libfuncs (neg_optab
, "neg", '2');
4867 init_floating_libfuncs (neg_optab
, "neg", '2');
4868 init_integral_libfuncs (negv_optab
, "negv", '2');
4869 init_floating_libfuncs (negv_optab
, "neg", '2');
4870 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4871 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4873 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
4874 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4875 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4876 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4878 #ifdef MULSI3_LIBCALL
4879 smul_optab
->handlers
[(int) SImode
].libfunc
4880 = init_one_libfunc (MULSI3_LIBCALL
);
4882 #ifdef MULDI3_LIBCALL
4883 smul_optab
->handlers
[(int) DImode
].libfunc
4884 = init_one_libfunc (MULDI3_LIBCALL
);
4887 #ifdef DIVSI3_LIBCALL
4888 sdiv_optab
->handlers
[(int) SImode
].libfunc
4889 = init_one_libfunc (DIVSI3_LIBCALL
);
4891 #ifdef DIVDI3_LIBCALL
4892 sdiv_optab
->handlers
[(int) DImode
].libfunc
4893 = init_one_libfunc (DIVDI3_LIBCALL
);
4896 #ifdef UDIVSI3_LIBCALL
4897 udiv_optab
->handlers
[(int) SImode
].libfunc
4898 = init_one_libfunc (UDIVSI3_LIBCALL
);
4900 #ifdef UDIVDI3_LIBCALL
4901 udiv_optab
->handlers
[(int) DImode
].libfunc
4902 = init_one_libfunc (UDIVDI3_LIBCALL
);
4905 #ifdef MODSI3_LIBCALL
4906 smod_optab
->handlers
[(int) SImode
].libfunc
4907 = init_one_libfunc (MODSI3_LIBCALL
);
4909 #ifdef MODDI3_LIBCALL
4910 smod_optab
->handlers
[(int) DImode
].libfunc
4911 = init_one_libfunc (MODDI3_LIBCALL
);
4914 #ifdef UMODSI3_LIBCALL
4915 umod_optab
->handlers
[(int) SImode
].libfunc
4916 = init_one_libfunc (UMODSI3_LIBCALL
);
4918 #ifdef UMODDI3_LIBCALL
4919 umod_optab
->handlers
[(int) DImode
].libfunc
4920 = init_one_libfunc (UMODDI3_LIBCALL
);
4923 /* Use cabs for DC complex abs, since systems generally have cabs.
4924 Don't define any libcall for SCmode, so that cabs will be used. */
4925 abs_optab
->handlers
[(int) DCmode
].libfunc
4926 = init_one_libfunc ("cabs");
4928 /* The ffs function operates on `int'. */
4929 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
4930 = init_one_libfunc ("ffs");
4932 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
4933 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
4934 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
4935 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
4936 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
4938 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
4939 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
4940 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
4941 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
4942 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
4944 memcpy_libfunc
= init_one_libfunc ("memcpy");
4945 memmove_libfunc
= init_one_libfunc ("memmove");
4946 bcopy_libfunc
= init_one_libfunc ("bcopy");
4947 memcmp_libfunc
= init_one_libfunc ("memcmp");
4948 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
4949 memset_libfunc
= init_one_libfunc ("memset");
4950 bzero_libfunc
= init_one_libfunc ("bzero");
4952 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
4953 ? "_Unwind_SjLj_Resume"
4954 : "_Unwind_Resume");
4955 #ifndef DONT_USE_BUILTIN_SETJMP
4956 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
4957 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
4959 setjmp_libfunc
= init_one_libfunc ("setjmp");
4960 longjmp_libfunc
= init_one_libfunc ("longjmp");
4962 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
4963 unwind_sjlj_unregister_libfunc
4964 = init_one_libfunc ("_Unwind_SjLj_Unregister");
4966 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
4967 nehf2_libfunc
= init_one_libfunc ("__nehf2");
4968 gthf2_libfunc
= init_one_libfunc ("__gthf2");
4969 gehf2_libfunc
= init_one_libfunc ("__gehf2");
4970 lthf2_libfunc
= init_one_libfunc ("__lthf2");
4971 lehf2_libfunc
= init_one_libfunc ("__lehf2");
4972 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
4974 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
4975 nesf2_libfunc
= init_one_libfunc ("__nesf2");
4976 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
4977 gesf2_libfunc
= init_one_libfunc ("__gesf2");
4978 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
4979 lesf2_libfunc
= init_one_libfunc ("__lesf2");
4980 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
4982 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
4983 nedf2_libfunc
= init_one_libfunc ("__nedf2");
4984 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
4985 gedf2_libfunc
= init_one_libfunc ("__gedf2");
4986 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
4987 ledf2_libfunc
= init_one_libfunc ("__ledf2");
4988 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
4990 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
4991 nexf2_libfunc
= init_one_libfunc ("__nexf2");
4992 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
4993 gexf2_libfunc
= init_one_libfunc ("__gexf2");
4994 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
4995 lexf2_libfunc
= init_one_libfunc ("__lexf2");
4996 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
4998 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
4999 netf2_libfunc
= init_one_libfunc ("__netf2");
5000 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5001 getf2_libfunc
= init_one_libfunc ("__getf2");
5002 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5003 letf2_libfunc
= init_one_libfunc ("__letf2");
5004 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5006 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5007 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5008 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5010 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5011 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5012 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5014 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5015 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5016 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5018 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5019 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5020 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5022 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5023 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5024 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5026 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5027 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5028 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5030 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5031 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5032 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5034 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5035 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5036 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5038 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5039 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5040 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5042 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5043 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5044 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5046 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5047 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5048 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5050 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5051 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5052 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5054 /* For function entry/exit instrumentation. */
5055 profile_function_entry_libfunc
5056 = init_one_libfunc ("__cyg_profile_func_enter");
5057 profile_function_exit_libfunc
5058 = init_one_libfunc ("__cyg_profile_func_exit");
5060 #ifdef HAVE_conditional_trap
5064 #ifdef INIT_TARGET_OPTABS
5065 /* Allow the target to add more libcalls or rename some, etc. */
5069 /* Add these GC roots. */
5070 ggc_add_root (optab_table
, OTI_MAX
, sizeof(optab
), mark_optab
);
5071 ggc_add_rtx_root (libfunc_table
, LTI_MAX
);
5074 #ifdef HAVE_conditional_trap
5075 /* The insn generating function can not take an rtx_code argument.
5076 TRAP_RTX is used as an rtx argument. Its code is replaced with
5077 the code to be used in the trap insn and all other fields are
5079 static rtx trap_rtx
;
5084 if (HAVE_conditional_trap
)
5086 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5087 ggc_add_rtx_root (&trap_rtx
, 1);
5092 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5093 CODE. Return 0 on failure. */
5096 gen_cond_trap (code
, op1
, op2
, tcode
)
5097 enum rtx_code code ATTRIBUTE_UNUSED
;
5098 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5100 enum machine_mode mode
= GET_MODE (op1
);
5102 if (mode
== VOIDmode
)
5105 #ifdef HAVE_conditional_trap
5106 if (HAVE_conditional_trap
5107 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5111 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5112 PUT_CODE (trap_rtx
, code
);
5113 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5117 insn
= gen_sequence ();