1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
54 See expr.h for documentation of these optabs. */
56 optab optab_table
[OTI_MAX
];
58 rtx libfunc_table
[LTI_MAX
];
60 /* Tables of patterns for extending one integer mode to another. */
61 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
63 /* Tables of patterns for converting between fixed and floating point. */
64 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
66 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
68 /* Contains the optab used for each rtx code. */
69 optab code_to_optab
[NUM_RTX_CODE
+ 1];
71 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
72 gives the gen_function to make a branch to test that condition. */
74 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
76 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
77 gives the insn code to make a store-condition insn
78 to test that condition. */
80 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
82 #ifdef HAVE_conditional_move
83 /* Indexed by the machine mode, gives the insn code to make a conditional
84 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
85 setcc_gen_code to cut down on the number of named patterns. Consider a day
86 when a lot more rtx codes are conditional (eg: for the ARM). */
88 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
91 /* The insn generating function can not take an rtx_code argument.
92 TRAP_RTX is used as an rtx argument. Its code is replaced with
93 the code to be used in the trap insn and all other fields are ignored. */
94 static GTY(()) rtx trap_rtx
;
96 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
97 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
99 static int expand_cmplxdiv_straight (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
100 enum machine_mode
, int,
101 enum optab_methods
, enum mode_class
,
103 static int expand_cmplxdiv_wide (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
104 enum machine_mode
, int, enum optab_methods
,
105 enum mode_class
, optab
);
106 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
107 enum machine_mode
*, int *,
108 enum can_compare_purpose
);
109 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
111 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
112 static rtx
ftruncify (rtx
);
113 static optab
new_optab (void);
114 static inline optab
init_optab (enum rtx_code
);
115 static inline optab
init_optabv (enum rtx_code
);
116 static void init_libfuncs (optab
, int, int, const char *, int);
117 static void init_integral_libfuncs (optab
, const char *, int);
118 static void init_floating_libfuncs (optab
, const char *, int);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
expand_vector_binop (enum machine_mode
, optab
, rtx
, rtx
, rtx
, int,
125 static rtx
expand_vector_unop (enum machine_mode
, optab
, rtx
, rtx
, int);
126 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
127 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
134 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135 the result of operation CODE applied to OP0 (and OP1 if it is a binary
138 If the last insn does not set TARGET, don't do anything, but return 1.
140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141 don't add the REG_EQUAL note but return 0. Our caller can then try
142 again, ensuring that TARGET is not one of the operands. */
145 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
147 rtx last_insn
, insn
, set
;
152 || NEXT_INSN (insns
) == NULL_RTX
)
155 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
156 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
159 if (GET_CODE (target
) == ZERO_EXTRACT
)
162 for (last_insn
= insns
;
163 NEXT_INSN (last_insn
) != NULL_RTX
;
164 last_insn
= NEXT_INSN (last_insn
))
167 set
= single_set (last_insn
);
171 if (! rtx_equal_p (SET_DEST (set
), target
)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
174 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178 besides the last insn. */
179 if (reg_overlap_mentioned_p (target
, op0
)
180 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
182 insn
= PREV_INSN (last_insn
);
183 while (insn
!= NULL_RTX
)
185 if (reg_set_p (target
, insn
))
188 insn
= PREV_INSN (insn
);
192 if (GET_RTX_CLASS (code
) == '1')
193 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
195 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
197 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
202 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204 not actually do a sign-extend or zero-extend, but can leave the
205 higher-order bits of the result rtx undefined, for example, in the case
206 of logical operations, but not right shifts. */
209 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
210 int unsignedp
, int no_extend
)
214 /* If we don't have to extend and this is a constant, return it. */
215 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219 extend since it will be more efficient to do so unless the signedness of
220 a promoted object differs from our extension. */
222 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
223 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
224 return convert_modes (mode
, oldmode
, op
, unsignedp
);
226 /* If MODE is no wider than a single word, we return a paradoxical
228 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
229 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
231 /* Otherwise, get an object of MODE, clobber it, and set the low-order
234 result
= gen_reg_rtx (mode
);
235 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
236 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
240 /* Generate code to perform a straightforward complex divide. */
243 expand_cmplxdiv_straight (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
,
244 rtx realr
, rtx imagr
, enum machine_mode submode
,
245 int unsignedp
, enum optab_methods methods
,
246 enum mode_class
class, optab binoptab
)
252 optab this_add_optab
= add_optab
;
253 optab this_sub_optab
= sub_optab
;
254 optab this_neg_optab
= neg_optab
;
255 optab this_mul_optab
= smul_optab
;
257 if (binoptab
== sdivv_optab
)
259 this_add_optab
= addv_optab
;
260 this_sub_optab
= subv_optab
;
261 this_neg_optab
= negv_optab
;
262 this_mul_optab
= smulv_optab
;
265 /* Don't fetch these from memory more than once. */
266 real0
= force_reg (submode
, real0
);
267 real1
= force_reg (submode
, real1
);
270 imag0
= force_reg (submode
, imag0
);
272 imag1
= force_reg (submode
, imag1
);
274 /* Divisor: c*c + d*d. */
275 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
276 NULL_RTX
, unsignedp
, methods
);
278 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
279 NULL_RTX
, unsignedp
, methods
);
281 if (temp1
== 0 || temp2
== 0)
284 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
285 NULL_RTX
, unsignedp
, methods
);
291 /* Mathematically, ((a)(c-id))/divisor. */
292 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
294 /* Calculate the dividend. */
295 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
296 NULL_RTX
, unsignedp
, methods
);
298 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
299 NULL_RTX
, unsignedp
, methods
);
301 if (real_t
== 0 || imag_t
== 0)
304 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
305 NULL_RTX
, unsignedp
);
309 /* Mathematically, ((a+ib)(c-id))/divider. */
310 /* Calculate the dividend. */
311 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
312 NULL_RTX
, unsignedp
, methods
);
314 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
315 NULL_RTX
, unsignedp
, methods
);
317 if (temp1
== 0 || temp2
== 0)
320 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
321 NULL_RTX
, unsignedp
, methods
);
323 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
324 NULL_RTX
, unsignedp
, methods
);
326 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
327 NULL_RTX
, unsignedp
, methods
);
329 if (temp1
== 0 || temp2
== 0)
332 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
333 NULL_RTX
, unsignedp
, methods
);
335 if (real_t
== 0 || imag_t
== 0)
339 if (class == MODE_COMPLEX_FLOAT
)
340 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
341 realr
, unsignedp
, methods
);
343 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
344 real_t
, divisor
, realr
, unsignedp
);
350 emit_move_insn (realr
, res
);
352 if (class == MODE_COMPLEX_FLOAT
)
353 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
354 imagr
, unsignedp
, methods
);
356 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
357 imag_t
, divisor
, imagr
, unsignedp
);
363 emit_move_insn (imagr
, res
);
368 /* Generate code to perform a wide-input-range-acceptable complex divide. */
371 expand_cmplxdiv_wide (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
, rtx realr
,
372 rtx imagr
, enum machine_mode submode
, int unsignedp
,
373 enum optab_methods methods
, enum mode_class
class,
378 rtx temp1
, temp2
, lab1
, lab2
;
379 enum machine_mode mode
;
381 optab this_add_optab
= add_optab
;
382 optab this_sub_optab
= sub_optab
;
383 optab this_neg_optab
= neg_optab
;
384 optab this_mul_optab
= smul_optab
;
386 if (binoptab
== sdivv_optab
)
388 this_add_optab
= addv_optab
;
389 this_sub_optab
= subv_optab
;
390 this_neg_optab
= negv_optab
;
391 this_mul_optab
= smulv_optab
;
394 /* Don't fetch these from memory more than once. */
395 real0
= force_reg (submode
, real0
);
396 real1
= force_reg (submode
, real1
);
399 imag0
= force_reg (submode
, imag0
);
401 imag1
= force_reg (submode
, imag1
);
403 /* XXX What's an "unsigned" complex number? */
411 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
412 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
415 if (temp1
== 0 || temp2
== 0)
418 mode
= GET_MODE (temp1
);
419 lab1
= gen_label_rtx ();
420 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
421 mode
, unsignedp
, lab1
);
423 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
425 if (class == MODE_COMPLEX_FLOAT
)
426 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
427 NULL_RTX
, unsignedp
, methods
);
429 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
430 imag1
, real1
, NULL_RTX
, unsignedp
);
435 /* Calculate divisor. */
437 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
438 NULL_RTX
, unsignedp
, methods
);
443 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
444 NULL_RTX
, unsignedp
, methods
);
449 /* Calculate dividend. */
455 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
457 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
458 NULL_RTX
, unsignedp
, methods
);
463 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
464 NULL_RTX
, unsignedp
);
466 if (real_t
== 0 || imag_t
== 0)
471 /* Compute (a+ib)/(c+id) as
472 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
474 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
475 NULL_RTX
, unsignedp
, methods
);
480 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
481 NULL_RTX
, unsignedp
, methods
);
483 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
484 NULL_RTX
, unsignedp
, methods
);
489 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
490 NULL_RTX
, unsignedp
, methods
);
492 if (real_t
== 0 || imag_t
== 0)
496 if (class == MODE_COMPLEX_FLOAT
)
497 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
498 realr
, unsignedp
, methods
);
500 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
501 real_t
, divisor
, realr
, unsignedp
);
507 emit_move_insn (realr
, res
);
509 if (class == MODE_COMPLEX_FLOAT
)
510 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
511 imagr
, unsignedp
, methods
);
513 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
514 imag_t
, divisor
, imagr
, unsignedp
);
520 emit_move_insn (imagr
, res
);
522 lab2
= gen_label_rtx ();
523 emit_jump_insn (gen_jump (lab2
));
528 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
530 if (class == MODE_COMPLEX_FLOAT
)
531 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
532 NULL_RTX
, unsignedp
, methods
);
534 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
535 real1
, imag1
, NULL_RTX
, unsignedp
);
540 /* Calculate divisor. */
542 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
543 NULL_RTX
, unsignedp
, methods
);
548 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
549 NULL_RTX
, unsignedp
, methods
);
554 /* Calculate dividend. */
558 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
560 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
561 NULL_RTX
, unsignedp
, methods
);
563 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
564 NULL_RTX
, unsignedp
);
566 if (real_t
== 0 || imag_t
== 0)
571 /* Compute (a+ib)/(c+id) as
572 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
574 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
575 NULL_RTX
, unsignedp
, methods
);
580 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
581 NULL_RTX
, unsignedp
, methods
);
583 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
584 NULL_RTX
, unsignedp
, methods
);
589 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
590 NULL_RTX
, unsignedp
, methods
);
592 if (real_t
== 0 || imag_t
== 0)
596 if (class == MODE_COMPLEX_FLOAT
)
597 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
598 realr
, unsignedp
, methods
);
600 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
601 real_t
, divisor
, realr
, unsignedp
);
607 emit_move_insn (realr
, res
);
609 if (class == MODE_COMPLEX_FLOAT
)
610 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
611 imagr
, unsignedp
, methods
);
613 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
614 imag_t
, divisor
, imagr
, unsignedp
);
620 emit_move_insn (imagr
, res
);
627 /* Wrapper around expand_binop which takes an rtx code to specify
628 the operation to perform, not an optab pointer. All other
629 arguments are the same. */
631 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
632 rtx op1
, rtx target
, int unsignedp
,
633 enum optab_methods methods
)
635 optab binop
= code_to_optab
[(int) code
];
639 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
642 /* Generate code to perform an operation specified by BINOPTAB
643 on operands OP0 and OP1, with result having machine-mode MODE.
645 UNSIGNEDP is for the case where we have to widen the operands
646 to perform the operation. It says to use zero-extension.
648 If TARGET is nonzero, the value
649 is generated there, if it is convenient to do so.
650 In all cases an rtx is returned for the locus of the value;
651 this may or may not be TARGET. */
654 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
655 rtx target
, int unsignedp
, enum optab_methods methods
)
657 enum optab_methods next_methods
658 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
659 ? OPTAB_WIDEN
: methods
);
660 enum mode_class
class;
661 enum machine_mode wider_mode
;
663 int commutative_op
= 0;
664 int shift_op
= (binoptab
->code
== ASHIFT
665 || binoptab
->code
== ASHIFTRT
666 || binoptab
->code
== LSHIFTRT
667 || binoptab
->code
== ROTATE
668 || binoptab
->code
== ROTATERT
);
669 rtx entry_last
= get_last_insn ();
672 class = GET_MODE_CLASS (mode
);
674 op0
= protect_from_queue (op0
, 0);
675 op1
= protect_from_queue (op1
, 0);
677 target
= protect_from_queue (target
, 1);
681 /* Load duplicate non-volatile operands once. */
682 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
684 op0
= force_not_mem (op0
);
689 op0
= force_not_mem (op0
);
690 op1
= force_not_mem (op1
);
694 /* If subtracting an integer constant, convert this into an addition of
695 the negated constant. */
697 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
699 op1
= negate_rtx (mode
, op1
);
700 binoptab
= add_optab
;
703 /* If we are inside an appropriately-short loop and one operand is an
704 expensive constant, force it into a register. */
705 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
706 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
707 op0
= force_reg (mode
, op0
);
709 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
710 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
711 op1
= force_reg (mode
, op1
);
713 /* Record where to delete back to if we backtrack. */
714 last
= get_last_insn ();
716 /* If operation is commutative,
717 try to make the first operand a register.
718 Even better, try to make it the same as the target.
719 Also try to make the last operand a constant. */
720 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
721 || binoptab
== smul_widen_optab
722 || binoptab
== umul_widen_optab
723 || binoptab
== smul_highpart_optab
724 || binoptab
== umul_highpart_optab
)
728 if (((target
== 0 || GET_CODE (target
) == REG
)
729 ? ((GET_CODE (op1
) == REG
730 && GET_CODE (op0
) != REG
)
732 : rtx_equal_p (op1
, target
))
733 || GET_CODE (op0
) == CONST_INT
)
741 /* If we can do it with a three-operand insn, do so. */
743 if (methods
!= OPTAB_MUST_WIDEN
744 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
746 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
747 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
748 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
750 rtx xop0
= op0
, xop1
= op1
;
755 temp
= gen_reg_rtx (mode
);
757 /* If it is a commutative operator and the modes would match
758 if we would swap the operands, we can save the conversions. */
761 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
762 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
766 tmp
= op0
; op0
= op1
; op1
= tmp
;
767 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
771 /* In case the insn wants input operands in modes different from
772 those of the actual operands, convert the operands. It would
773 seem that we don't need to convert CONST_INTs, but we do, so
774 that they're properly zero-extended, sign-extended or truncated
777 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
778 xop0
= convert_modes (mode0
,
779 GET_MODE (op0
) != VOIDmode
784 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
785 xop1
= convert_modes (mode1
,
786 GET_MODE (op1
) != VOIDmode
791 /* Now, if insn's predicates don't allow our operands, put them into
794 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
795 && mode0
!= VOIDmode
)
796 xop0
= copy_to_mode_reg (mode0
, xop0
);
798 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
799 && mode1
!= VOIDmode
)
800 xop1
= copy_to_mode_reg (mode1
, xop1
);
802 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
803 temp
= gen_reg_rtx (mode
);
805 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
808 /* If PAT is composed of more than one insn, try to add an appropriate
809 REG_EQUAL note to it. If we can't because TEMP conflicts with an
810 operand, call ourselves again, this time without a target. */
811 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
812 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
814 delete_insns_since (last
);
815 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
823 delete_insns_since (last
);
826 /* If this is a multiply, see if we can do a widening operation that
827 takes operands of this mode and makes a wider mode. */
829 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
830 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
831 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
832 != CODE_FOR_nothing
))
834 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
835 unsignedp
? umul_widen_optab
: smul_widen_optab
,
836 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
840 if (GET_MODE_CLASS (mode
) == MODE_INT
)
841 return gen_lowpart (mode
, temp
);
843 return convert_to_mode (mode
, temp
, unsignedp
);
847 /* Look for a wider mode of the same class for which we think we
848 can open-code the operation. Check for a widening multiply at the
849 wider mode as well. */
851 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
852 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
853 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
854 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
856 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
857 || (binoptab
== smul_optab
858 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
859 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
860 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
861 != CODE_FOR_nothing
)))
863 rtx xop0
= op0
, xop1
= op1
;
866 /* For certain integer operations, we need not actually extend
867 the narrow operands, as long as we will truncate
868 the results to the same narrowness. */
870 if ((binoptab
== ior_optab
|| binoptab
== and_optab
871 || binoptab
== xor_optab
872 || binoptab
== add_optab
|| binoptab
== sub_optab
873 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
874 && class == MODE_INT
)
877 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
879 /* The second operand of a shift must always be extended. */
880 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
881 no_extend
&& binoptab
!= ashl_optab
);
883 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
884 unsignedp
, OPTAB_DIRECT
);
887 if (class != MODE_INT
)
890 target
= gen_reg_rtx (mode
);
891 convert_move (target
, temp
, 0);
895 return gen_lowpart (mode
, temp
);
898 delete_insns_since (last
);
902 /* These can be done a word at a time. */
903 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
905 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
906 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
912 /* If TARGET is the same as one of the operands, the REG_EQUAL note
913 won't be accurate, so use a new target. */
914 if (target
== 0 || target
== op0
|| target
== op1
)
915 target
= gen_reg_rtx (mode
);
919 /* Do the actual arithmetic. */
920 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
922 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
923 rtx x
= expand_binop (word_mode
, binoptab
,
924 operand_subword_force (op0
, i
, mode
),
925 operand_subword_force (op1
, i
, mode
),
926 target_piece
, unsignedp
, next_methods
);
931 if (target_piece
!= x
)
932 emit_move_insn (target_piece
, x
);
935 insns
= get_insns ();
938 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
940 if (binoptab
->code
!= UNKNOWN
)
942 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
943 copy_rtx (op0
), copy_rtx (op1
));
947 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
952 /* Synthesize double word shifts from single word shifts. */
953 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
954 || binoptab
== ashr_optab
)
956 && GET_CODE (op1
) == CONST_INT
957 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
958 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
959 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
960 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
962 rtx insns
, inter
, equiv_value
;
963 rtx into_target
, outof_target
;
964 rtx into_input
, outof_input
;
965 int shift_count
, left_shift
, outof_word
;
967 /* If TARGET is the same as one of the operands, the REG_EQUAL note
968 won't be accurate, so use a new target. */
969 if (target
== 0 || target
== op0
|| target
== op1
)
970 target
= gen_reg_rtx (mode
);
974 shift_count
= INTVAL (op1
);
976 /* OUTOF_* is the word we are shifting bits away from, and
977 INTO_* is the word that we are shifting bits towards, thus
978 they differ depending on the direction of the shift and
981 left_shift
= binoptab
== ashl_optab
;
982 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
984 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
985 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
987 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
988 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
990 if (shift_count
>= BITS_PER_WORD
)
992 inter
= expand_binop (word_mode
, binoptab
,
994 GEN_INT (shift_count
- BITS_PER_WORD
),
995 into_target
, unsignedp
, next_methods
);
997 if (inter
!= 0 && inter
!= into_target
)
998 emit_move_insn (into_target
, inter
);
1000 /* For a signed right shift, we must fill the word we are shifting
1001 out of with copies of the sign bit. Otherwise it is zeroed. */
1002 if (inter
!= 0 && binoptab
!= ashr_optab
)
1003 inter
= CONST0_RTX (word_mode
);
1004 else if (inter
!= 0)
1005 inter
= expand_binop (word_mode
, binoptab
,
1007 GEN_INT (BITS_PER_WORD
- 1),
1008 outof_target
, unsignedp
, next_methods
);
1010 if (inter
!= 0 && inter
!= outof_target
)
1011 emit_move_insn (outof_target
, inter
);
1016 optab reverse_unsigned_shift
, unsigned_shift
;
1018 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1019 we must do a logical shift in the opposite direction of the
1022 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1024 /* For a shift of less than BITS_PER_WORD, to compute the word
1025 shifted towards, we need to unsigned shift the orig value of
1028 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1030 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1032 GEN_INT (BITS_PER_WORD
- shift_count
),
1033 0, unsignedp
, next_methods
);
1038 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1039 op1
, 0, unsignedp
, next_methods
);
1042 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1043 into_target
, unsignedp
, next_methods
);
1045 if (inter
!= 0 && inter
!= into_target
)
1046 emit_move_insn (into_target
, inter
);
1049 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1050 op1
, outof_target
, unsignedp
, next_methods
);
1052 if (inter
!= 0 && inter
!= outof_target
)
1053 emit_move_insn (outof_target
, inter
);
1056 insns
= get_insns ();
1061 if (binoptab
->code
!= UNKNOWN
)
1062 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1066 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1071 /* Synthesize double word rotates from single word shifts. */
1072 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1073 && class == MODE_INT
1074 && GET_CODE (op1
) == CONST_INT
1075 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1076 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1077 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1079 rtx insns
, equiv_value
;
1080 rtx into_target
, outof_target
;
1081 rtx into_input
, outof_input
;
1083 int shift_count
, left_shift
, outof_word
;
1085 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1086 won't be accurate, so use a new target. */
1087 if (target
== 0 || target
== op0
|| target
== op1
)
1088 target
= gen_reg_rtx (mode
);
1092 shift_count
= INTVAL (op1
);
1094 /* OUTOF_* is the word we are shifting bits away from, and
1095 INTO_* is the word that we are shifting bits towards, thus
1096 they differ depending on the direction of the shift and
1097 WORDS_BIG_ENDIAN. */
1099 left_shift
= (binoptab
== rotl_optab
);
1100 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1102 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1103 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1105 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1106 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1108 if (shift_count
== BITS_PER_WORD
)
1110 /* This is just a word swap. */
1111 emit_move_insn (outof_target
, into_input
);
1112 emit_move_insn (into_target
, outof_input
);
1117 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1118 rtx first_shift_count
, second_shift_count
;
1119 optab reverse_unsigned_shift
, unsigned_shift
;
1121 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1122 ? lshr_optab
: ashl_optab
);
1124 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1125 ? ashl_optab
: lshr_optab
);
1127 if (shift_count
> BITS_PER_WORD
)
1129 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1130 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1134 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1135 second_shift_count
= GEN_INT (shift_count
);
1138 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1139 outof_input
, first_shift_count
,
1140 NULL_RTX
, unsignedp
, next_methods
);
1141 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1142 into_input
, second_shift_count
,
1143 NULL_RTX
, unsignedp
, next_methods
);
1145 if (into_temp1
!= 0 && into_temp2
!= 0)
1146 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1147 into_target
, unsignedp
, next_methods
);
1151 if (inter
!= 0 && inter
!= into_target
)
1152 emit_move_insn (into_target
, inter
);
1154 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1155 into_input
, first_shift_count
,
1156 NULL_RTX
, unsignedp
, next_methods
);
1157 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1158 outof_input
, second_shift_count
,
1159 NULL_RTX
, unsignedp
, next_methods
);
1161 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1162 inter
= expand_binop (word_mode
, ior_optab
,
1163 outof_temp1
, outof_temp2
,
1164 outof_target
, unsignedp
, next_methods
);
1166 if (inter
!= 0 && inter
!= outof_target
)
1167 emit_move_insn (outof_target
, inter
);
1170 insns
= get_insns ();
1175 if (binoptab
->code
!= UNKNOWN
)
1176 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1180 /* We can't make this a no conflict block if this is a word swap,
1181 because the word swap case fails if the input and output values
1182 are in the same register. */
1183 if (shift_count
!= BITS_PER_WORD
)
1184 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1193 /* These can be done a word at a time by propagating carries. */
1194 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1195 && class == MODE_INT
1196 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1197 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1200 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1201 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1202 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1203 rtx xop0
, xop1
, xtarget
;
1205 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1206 value is one of those, use it. Otherwise, use 1 since it is the
1207 one easiest to get. */
1208 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1209 int normalizep
= STORE_FLAG_VALUE
;
1214 /* Prepare the operands. */
1215 xop0
= force_reg (mode
, op0
);
1216 xop1
= force_reg (mode
, op1
);
1218 xtarget
= gen_reg_rtx (mode
);
1220 if (target
== 0 || GET_CODE (target
) != REG
)
1223 /* Indicate for flow that the entire target reg is being set. */
1224 if (GET_CODE (target
) == REG
)
1225 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1227 /* Do the actual arithmetic. */
1228 for (i
= 0; i
< nwords
; i
++)
1230 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1231 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1232 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1233 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1236 /* Main add/subtract of the input operands. */
1237 x
= expand_binop (word_mode
, binoptab
,
1238 op0_piece
, op1_piece
,
1239 target_piece
, unsignedp
, next_methods
);
1245 /* Store carry from main add/subtract. */
1246 carry_out
= gen_reg_rtx (word_mode
);
1247 carry_out
= emit_store_flag_force (carry_out
,
1248 (binoptab
== add_optab
1251 word_mode
, 1, normalizep
);
1258 /* Add/subtract previous carry to main result. */
1259 newx
= expand_binop (word_mode
,
1260 normalizep
== 1 ? binoptab
: otheroptab
,
1262 NULL_RTX
, 1, next_methods
);
1266 /* Get out carry from adding/subtracting carry in. */
1267 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1268 carry_tmp
= emit_store_flag_force (carry_tmp
,
1269 (binoptab
== add_optab
1272 word_mode
, 1, normalizep
);
1274 /* Logical-ior the two poss. carry together. */
1275 carry_out
= expand_binop (word_mode
, ior_optab
,
1276 carry_out
, carry_tmp
,
1277 carry_out
, 0, next_methods
);
1281 emit_move_insn (target_piece
, newx
);
1284 carry_in
= carry_out
;
1287 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1289 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1290 || ! rtx_equal_p (target
, xtarget
))
1292 rtx temp
= emit_move_insn (target
, xtarget
);
1294 set_unique_reg_note (temp
,
1296 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1307 delete_insns_since (last
);
1310 /* If we want to multiply two two-word values and have normal and widening
1311 multiplies of single-word values, we can do this with three smaller
1312 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1313 because we are not operating on one word at a time.
1315 The multiplication proceeds as follows:
1316 _______________________
1317 [__op0_high_|__op0_low__]
1318 _______________________
1319 * [__op1_high_|__op1_low__]
1320 _______________________________________________
1321 _______________________
1322 (1) [__op0_low__*__op1_low__]
1323 _______________________
1324 (2a) [__op0_low__*__op1_high_]
1325 _______________________
1326 (2b) [__op0_high_*__op1_low__]
1327 _______________________
1328 (3) [__op0_high_*__op1_high_]
1331 This gives a 4-word result. Since we are only interested in the
1332 lower 2 words, partial result (3) and the upper words of (2a) and
1333 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1334 calculated using non-widening multiplication.
1336 (1), however, needs to be calculated with an unsigned widening
1337 multiplication. If this operation is not directly supported we
1338 try using a signed widening multiplication and adjust the result.
1339 This adjustment works as follows:
1341 If both operands are positive then no adjustment is needed.
1343 If the operands have different signs, for example op0_low < 0 and
1344 op1_low >= 0, the instruction treats the most significant bit of
1345 op0_low as a sign bit instead of a bit with significance
1346 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1347 with 2**BITS_PER_WORD - op0_low, and two's complements the
1348 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1351 Similarly, if both operands are negative, we need to add
1352 (op0_low + op1_low) * 2**BITS_PER_WORD.
1354 We use a trick to adjust quickly. We logically shift op0_low right
1355 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1356 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1357 logical shift exists, we do an arithmetic right shift and subtract
1360 if (binoptab
== smul_optab
1361 && class == MODE_INT
1362 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1363 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1364 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1365 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1366 != CODE_FOR_nothing
)
1367 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1368 != CODE_FOR_nothing
)))
1370 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1371 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1372 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1373 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1374 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1375 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1377 rtx op0_xhigh
= NULL_RTX
;
1378 rtx op1_xhigh
= NULL_RTX
;
1380 /* If the target is the same as one of the inputs, don't use it. This
1381 prevents problems with the REG_EQUAL note. */
1382 if (target
== op0
|| target
== op1
1383 || (target
!= 0 && GET_CODE (target
) != REG
))
1386 /* Multiply the two lower words to get a double-word product.
1387 If unsigned widening multiplication is available, use that;
1388 otherwise use the signed form and compensate. */
1390 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1392 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1393 target
, 1, OPTAB_DIRECT
);
1395 /* If we didn't succeed, delete everything we did so far. */
1397 delete_insns_since (last
);
1399 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1403 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1404 != CODE_FOR_nothing
)
1406 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1407 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1408 target
, 1, OPTAB_DIRECT
);
1409 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1410 NULL_RTX
, 1, next_methods
);
1412 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1413 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1416 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1417 NULL_RTX
, 0, next_methods
);
1419 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1420 op0_xhigh
, op0_xhigh
, 0,
1424 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1425 NULL_RTX
, 1, next_methods
);
1427 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1428 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1431 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1432 NULL_RTX
, 0, next_methods
);
1434 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1435 op1_xhigh
, op1_xhigh
, 0,
1440 /* If we have been able to directly compute the product of the
1441 low-order words of the operands and perform any required adjustments
1442 of the operands, we proceed by trying two more multiplications
1443 and then computing the appropriate sum.
1445 We have checked above that the required addition is provided.
1446 Full-word addition will normally always succeed, especially if
1447 it is provided at all, so we don't worry about its failure. The
1448 multiplication may well fail, however, so we do handle that. */
1450 if (product
&& op0_xhigh
&& op1_xhigh
)
1452 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1453 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1454 NULL_RTX
, 0, OPTAB_DIRECT
);
1456 if (!REG_P (product_high
))
1457 product_high
= force_reg (word_mode
, product_high
);
1460 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1461 product_high
, 0, next_methods
);
1463 if (temp
!= 0 && temp
!= product_high
)
1464 emit_move_insn (product_high
, temp
);
1467 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1468 NULL_RTX
, 0, OPTAB_DIRECT
);
1471 temp
= expand_binop (word_mode
, add_optab
, temp
,
1472 product_high
, product_high
,
1475 if (temp
!= 0 && temp
!= product_high
)
1476 emit_move_insn (product_high
, temp
);
1478 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1482 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1484 temp
= emit_move_insn (product
, product
);
1485 set_unique_reg_note (temp
,
1487 gen_rtx_fmt_ee (MULT
, mode
,
1496 /* If we get here, we couldn't do it for some reason even though we
1497 originally thought we could. Delete anything we've emitted in
1500 delete_insns_since (last
);
1503 /* Open-code the vector operations if we have no hardware support
1505 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1506 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1507 unsignedp
, methods
);
1509 /* We need to open-code the complex type operations: '+, -, * and /' */
1511 /* At this point we allow operations between two similar complex
1512 numbers, and also if one of the operands is not a complex number
1513 but rather of MODE_FLOAT or MODE_INT. However, the caller
1514 must make sure that the MODE of the non-complex operand matches
1515 the SUBMODE of the complex operand. */
1517 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1519 rtx real0
= 0, imag0
= 0;
1520 rtx real1
= 0, imag1
= 0;
1521 rtx realr
, imagr
, res
;
1526 /* Find the correct mode for the real and imaginary parts. */
1527 enum machine_mode submode
= GET_MODE_INNER(mode
);
1529 if (submode
== BLKmode
)
1533 target
= gen_reg_rtx (mode
);
1537 realr
= gen_realpart (submode
, target
);
1538 imagr
= gen_imagpart (submode
, target
);
1540 if (GET_MODE (op0
) == mode
)
1542 real0
= gen_realpart (submode
, op0
);
1543 imag0
= gen_imagpart (submode
, op0
);
1548 if (GET_MODE (op1
) == mode
)
1550 real1
= gen_realpart (submode
, op1
);
1551 imag1
= gen_imagpart (submode
, op1
);
1556 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1559 switch (binoptab
->code
)
1562 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1564 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1565 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1566 realr
, unsignedp
, methods
);
1570 else if (res
!= realr
)
1571 emit_move_insn (realr
, res
);
1573 if (imag0
!= 0 && imag1
!= 0)
1574 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1575 imagr
, unsignedp
, methods
);
1576 else if (imag0
!= 0)
1578 else if (binoptab
->code
== MINUS
)
1579 res
= expand_unop (submode
,
1580 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1581 imag1
, imagr
, unsignedp
);
1587 else if (res
!= imagr
)
1588 emit_move_insn (imagr
, res
);
1594 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1596 if (imag0
!= 0 && imag1
!= 0)
1600 /* Don't fetch these from memory more than once. */
1601 real0
= force_reg (submode
, real0
);
1602 real1
= force_reg (submode
, real1
);
1603 imag0
= force_reg (submode
, imag0
);
1604 imag1
= force_reg (submode
, imag1
);
1606 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1607 unsignedp
, methods
);
1609 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1610 unsignedp
, methods
);
1612 if (temp1
== 0 || temp2
== 0)
1617 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1618 temp1
, temp2
, realr
, unsignedp
, methods
));
1622 else if (res
!= realr
)
1623 emit_move_insn (realr
, res
);
1625 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1626 NULL_RTX
, unsignedp
, methods
);
1628 /* Avoid expanding redundant multiplication for the common
1629 case of squaring a complex number. */
1630 if (rtx_equal_p (real0
, real1
) && rtx_equal_p (imag0
, imag1
))
1633 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1634 NULL_RTX
, unsignedp
, methods
);
1636 if (temp1
== 0 || temp2
== 0)
1641 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1642 temp1
, temp2
, imagr
, unsignedp
, methods
));
1646 else if (res
!= imagr
)
1647 emit_move_insn (imagr
, res
);
1653 /* Don't fetch these from memory more than once. */
1654 real0
= force_reg (submode
, real0
);
1655 real1
= force_reg (submode
, real1
);
1657 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1658 realr
, unsignedp
, methods
);
1661 else if (res
!= realr
)
1662 emit_move_insn (realr
, res
);
1665 res
= expand_binop (submode
, binoptab
,
1666 real1
, imag0
, imagr
, unsignedp
, methods
);
1668 res
= expand_binop (submode
, binoptab
,
1669 real0
, imag1
, imagr
, unsignedp
, methods
);
1673 else if (res
!= imagr
)
1674 emit_move_insn (imagr
, res
);
1681 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1685 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1687 /* Don't fetch these from memory more than once. */
1688 real1
= force_reg (submode
, real1
);
1690 /* Simply divide the real and imaginary parts by `c' */
1691 if (class == MODE_COMPLEX_FLOAT
)
1692 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1693 realr
, unsignedp
, methods
);
1695 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1696 real0
, real1
, realr
, unsignedp
);
1700 else if (res
!= realr
)
1701 emit_move_insn (realr
, res
);
1703 if (class == MODE_COMPLEX_FLOAT
)
1704 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1705 imagr
, unsignedp
, methods
);
1707 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1708 imag0
, real1
, imagr
, unsignedp
);
1712 else if (res
!= imagr
)
1713 emit_move_insn (imagr
, res
);
1719 switch (flag_complex_divide_method
)
1722 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1723 realr
, imagr
, submode
,
1729 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1730 realr
, imagr
, submode
,
1750 if (binoptab
->code
!= UNKNOWN
)
1752 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1753 copy_rtx (op0
), copy_rtx (op1
));
1757 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1763 /* It can't be open-coded in this mode.
1764 Use a library call if one is available and caller says that's ok. */
1766 if (binoptab
->handlers
[(int) mode
].libfunc
1767 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1771 enum machine_mode op1_mode
= mode
;
1778 op1_mode
= word_mode
;
1779 /* Specify unsigned here,
1780 since negative shift counts are meaningless. */
1781 op1x
= convert_to_mode (word_mode
, op1
, 1);
1784 if (GET_MODE (op0
) != VOIDmode
1785 && GET_MODE (op0
) != mode
)
1786 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1788 /* Pass 1 for NO_QUEUE so we don't lose any increments
1789 if the libcall is cse'd or moved. */
1790 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1791 NULL_RTX
, LCT_CONST
, mode
, 2,
1792 op0
, mode
, op1x
, op1_mode
);
1794 insns
= get_insns ();
1797 target
= gen_reg_rtx (mode
);
1798 emit_libcall_block (insns
, target
, value
,
1799 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1804 delete_insns_since (last
);
1806 /* It can't be done in this mode. Can we do it in a wider mode? */
1808 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1809 || methods
== OPTAB_MUST_WIDEN
))
1811 /* Caller says, don't even try. */
1812 delete_insns_since (entry_last
);
1816 /* Compute the value of METHODS to pass to recursive calls.
1817 Don't allow widening to be tried recursively. */
1819 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1821 /* Look for a wider mode of the same class for which it appears we can do
1824 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1826 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1827 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1829 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1830 != CODE_FOR_nothing
)
1831 || (methods
== OPTAB_LIB
1832 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1834 rtx xop0
= op0
, xop1
= op1
;
1837 /* For certain integer operations, we need not actually extend
1838 the narrow operands, as long as we will truncate
1839 the results to the same narrowness. */
1841 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1842 || binoptab
== xor_optab
1843 || binoptab
== add_optab
|| binoptab
== sub_optab
1844 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1845 && class == MODE_INT
)
1848 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1849 unsignedp
, no_extend
);
1851 /* The second operand of a shift must always be extended. */
1852 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1853 no_extend
&& binoptab
!= ashl_optab
);
1855 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1856 unsignedp
, methods
);
1859 if (class != MODE_INT
)
1862 target
= gen_reg_rtx (mode
);
1863 convert_move (target
, temp
, 0);
1867 return gen_lowpart (mode
, temp
);
1870 delete_insns_since (last
);
1875 delete_insns_since (entry_last
);
1879 /* Like expand_binop, but for open-coding vectors binops. */
1882 expand_vector_binop (enum machine_mode mode
, optab binoptab
, rtx op0
,
1883 rtx op1
, rtx target
, int unsignedp
,
1884 enum optab_methods methods
)
1886 enum machine_mode submode
, tmode
;
1887 int size
, elts
, subsize
, subbitsize
, i
;
1888 rtx t
, a
, b
, res
, seq
;
1889 enum mode_class
class;
1891 class = GET_MODE_CLASS (mode
);
1893 size
= GET_MODE_SIZE (mode
);
1894 submode
= GET_MODE_INNER (mode
);
1896 /* Search for the widest vector mode with the same inner mode that is
1897 still narrower than MODE and that allows to open-code this operator.
1898 Note, if we find such a mode and the handler later decides it can't
1899 do the expansion, we'll be called recursively with the narrower mode. */
1900 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1901 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1902 tmode
= GET_MODE_WIDER_MODE (tmode
))
1904 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1905 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1909 switch (binoptab
->code
)
1914 tmode
= int_mode_for_mode (mode
);
1915 if (tmode
!= BLKmode
)
1921 subsize
= GET_MODE_SIZE (submode
);
1922 subbitsize
= GET_MODE_BITSIZE (submode
);
1923 elts
= size
/ subsize
;
1925 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1926 but that we operate on more than one element at a time. */
1927 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1932 /* Errors can leave us with a const0_rtx as operand. */
1933 if (GET_MODE (op0
) != mode
)
1934 op0
= copy_to_mode_reg (mode
, op0
);
1935 if (GET_MODE (op1
) != mode
)
1936 op1
= copy_to_mode_reg (mode
, op1
);
1939 target
= gen_reg_rtx (mode
);
1941 for (i
= 0; i
< elts
; ++i
)
1943 /* If this is part of a register, and not the first item in the
1944 word, we can't store using a SUBREG - that would clobber
1946 And storing with a SUBREG is only possible for the least
1947 significant part, hence we can't do it for big endian
1948 (unless we want to permute the evaluation order. */
1949 if (GET_CODE (target
) == REG
1950 && (BYTES_BIG_ENDIAN
1951 ? subsize
< UNITS_PER_WORD
1952 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1955 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1956 if (CONSTANT_P (op0
))
1957 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1959 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1960 NULL_RTX
, submode
, submode
, size
);
1961 if (CONSTANT_P (op1
))
1962 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1964 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1965 NULL_RTX
, submode
, submode
, size
);
1967 if (binoptab
->code
== DIV
)
1969 if (class == MODE_VECTOR_FLOAT
)
1970 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1971 unsignedp
, methods
);
1973 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1974 a
, b
, t
, unsignedp
);
1977 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1978 unsignedp
, methods
);
1984 emit_move_insn (t
, res
);
1986 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2002 /* Like expand_unop but for open-coding vector unops. */
2005 expand_vector_unop (enum machine_mode mode
, optab unoptab
, rtx op0
,
2006 rtx target
, int unsignedp
)
2008 enum machine_mode submode
, tmode
;
2009 int size
, elts
, subsize
, subbitsize
, i
;
2012 size
= GET_MODE_SIZE (mode
);
2013 submode
= GET_MODE_INNER (mode
);
2015 /* Search for the widest vector mode with the same inner mode that is
2016 still narrower than MODE and that allows to open-code this operator.
2017 Note, if we find such a mode and the handler later decides it can't
2018 do the expansion, we'll be called recursively with the narrower mode. */
2019 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2020 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2021 tmode
= GET_MODE_WIDER_MODE (tmode
))
2023 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2024 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2027 /* If there is no negate operation, try doing a subtract from zero. */
2028 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2029 /* Avoid infinite recursion when an
2030 error has left us with the wrong mode. */
2031 && GET_MODE (op0
) == mode
)
2034 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2035 target
, unsignedp
, OPTAB_DIRECT
);
2040 if (unoptab
== one_cmpl_optab
)
2042 tmode
= int_mode_for_mode (mode
);
2043 if (tmode
!= BLKmode
)
2047 subsize
= GET_MODE_SIZE (submode
);
2048 subbitsize
= GET_MODE_BITSIZE (submode
);
2049 elts
= size
/ subsize
;
2051 /* Errors can leave us with a const0_rtx as operand. */
2052 if (GET_MODE (op0
) != mode
)
2053 op0
= copy_to_mode_reg (mode
, op0
);
2056 target
= gen_reg_rtx (mode
);
2060 for (i
= 0; i
< elts
; ++i
)
2062 /* If this is part of a register, and not the first item in the
2063 word, we can't store using a SUBREG - that would clobber
2065 And storing with a SUBREG is only possible for the least
2066 significant part, hence we can't do it for big endian
2067 (unless we want to permute the evaluation order. */
2068 if (GET_CODE (target
) == REG
2069 && (BYTES_BIG_ENDIAN
2070 ? subsize
< UNITS_PER_WORD
2071 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2074 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2075 if (CONSTANT_P (op0
))
2076 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2078 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2079 t
, submode
, submode
, size
);
2081 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2084 emit_move_insn (t
, res
);
2086 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2097 /* Expand a binary operator which has both signed and unsigned forms.
2098 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2101 If we widen unsigned operands, we may use a signed wider operation instead
2102 of an unsigned wider operation, since the result would be the same. */
2105 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2106 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2107 enum optab_methods methods
)
2110 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2111 struct optab wide_soptab
;
2113 /* Do it without widening, if possible. */
2114 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2115 unsignedp
, OPTAB_DIRECT
);
2116 if (temp
|| methods
== OPTAB_DIRECT
)
2119 /* Try widening to a signed int. Make a fake signed optab that
2120 hides any signed insn for direct use. */
2121 wide_soptab
= *soptab
;
2122 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2123 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2125 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2126 unsignedp
, OPTAB_WIDEN
);
2128 /* For unsigned operands, try widening to an unsigned int. */
2129 if (temp
== 0 && unsignedp
)
2130 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2131 unsignedp
, OPTAB_WIDEN
);
2132 if (temp
|| methods
== OPTAB_WIDEN
)
2135 /* Use the right width lib call if that exists. */
2136 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2137 if (temp
|| methods
== OPTAB_LIB
)
2140 /* Must widen and use a lib call, use either signed or unsigned. */
2141 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2142 unsignedp
, methods
);
2146 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2147 unsignedp
, methods
);
2151 /* Generate code to perform an operation specified by BINOPTAB
2152 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2153 We assume that the order of the operands for the instruction
2154 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2155 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2157 Either TARG0 or TARG1 may be zero, but what that means is that
2158 the result is not actually wanted. We will generate it into
2159 a dummy pseudo-reg and discard it. They may not both be zero.
2161 Returns 1 if this operation can be performed; 0 if not. */
2164 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2167 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2168 enum mode_class
class;
2169 enum machine_mode wider_mode
;
2170 rtx entry_last
= get_last_insn ();
2173 class = GET_MODE_CLASS (mode
);
2175 op0
= protect_from_queue (op0
, 0);
2176 op1
= protect_from_queue (op1
, 0);
2180 op0
= force_not_mem (op0
);
2181 op1
= force_not_mem (op1
);
2184 /* If we are inside an appropriately-short loop and one operand is an
2185 expensive constant, force it into a register. */
2186 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2187 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2188 op0
= force_reg (mode
, op0
);
2190 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2191 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2192 op1
= force_reg (mode
, op1
);
2195 targ0
= protect_from_queue (targ0
, 1);
2197 targ0
= gen_reg_rtx (mode
);
2199 targ1
= protect_from_queue (targ1
, 1);
2201 targ1
= gen_reg_rtx (mode
);
2203 /* Record where to go back to if we fail. */
2204 last
= get_last_insn ();
2206 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2208 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2209 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2210 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2212 rtx xop0
= op0
, xop1
= op1
;
2214 /* In case the insn wants input operands in modes different from
2215 those of the actual operands, convert the operands. It would
2216 seem that we don't need to convert CONST_INTs, but we do, so
2217 that they're properly zero-extended, sign-extended or truncated
2220 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2221 xop0
= convert_modes (mode0
,
2222 GET_MODE (op0
) != VOIDmode
2227 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2228 xop1
= convert_modes (mode1
,
2229 GET_MODE (op1
) != VOIDmode
2234 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2235 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2236 xop0
= copy_to_mode_reg (mode0
, xop0
);
2238 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2239 xop1
= copy_to_mode_reg (mode1
, xop1
);
2241 /* We could handle this, but we should always be called with a pseudo
2242 for our targets and all insns should take them as outputs. */
2243 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2244 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2247 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2254 delete_insns_since (last
);
2257 /* It can't be done in this mode. Can we do it in a wider mode? */
2259 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2261 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2262 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2264 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2265 != CODE_FOR_nothing
)
2267 rtx t0
= gen_reg_rtx (wider_mode
);
2268 rtx t1
= gen_reg_rtx (wider_mode
);
2269 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2270 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2272 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2275 convert_move (targ0
, t0
, unsignedp
);
2276 convert_move (targ1
, t1
, unsignedp
);
2280 delete_insns_since (last
);
2285 delete_insns_since (entry_last
);
2289 /* Wrapper around expand_unop which takes an rtx code to specify
2290 the operation to perform, not an optab pointer. All other
2291 arguments are the same. */
2293 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2294 rtx target
, int unsignedp
)
2296 optab unop
= code_to_optab
[(int) code
];
2300 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2306 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2308 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2310 enum mode_class
class = GET_MODE_CLASS (mode
);
2311 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2313 enum machine_mode wider_mode
;
2314 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2315 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2317 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2318 != CODE_FOR_nothing
)
2320 rtx xop0
, temp
, last
;
2322 last
= get_last_insn ();
2325 target
= gen_reg_rtx (mode
);
2326 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2327 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2329 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2330 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2331 - GET_MODE_BITSIZE (mode
)),
2332 target
, true, OPTAB_DIRECT
);
2334 delete_insns_since (last
);
2343 /* Try calculating (parity x) as (and (popcount x) 1), where
2344 popcount can also be done in a wider mode. */
2346 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2348 enum mode_class
class = GET_MODE_CLASS (mode
);
2349 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2351 enum machine_mode wider_mode
;
2352 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2353 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2355 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2356 != CODE_FOR_nothing
)
2358 rtx xop0
, temp
, last
;
2360 last
= get_last_insn ();
2363 target
= gen_reg_rtx (mode
);
2364 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2365 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2368 temp
= expand_binop (wider_mode
, and_optab
, temp
, GEN_INT (1),
2369 target
, true, OPTAB_DIRECT
);
2371 delete_insns_since (last
);
2380 /* Generate code to perform an operation specified by UNOPTAB
2381 on operand OP0, with result having machine-mode MODE.
2383 UNSIGNEDP is for the case where we have to widen the operands
2384 to perform the operation. It says to use zero-extension.
2386 If TARGET is nonzero, the value
2387 is generated there, if it is convenient to do so.
2388 In all cases an rtx is returned for the locus of the value;
2389 this may or may not be TARGET. */
2392 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2395 enum mode_class
class;
2396 enum machine_mode wider_mode
;
2398 rtx last
= get_last_insn ();
2401 class = GET_MODE_CLASS (mode
);
2403 op0
= protect_from_queue (op0
, 0);
2407 op0
= force_not_mem (op0
);
2411 target
= protect_from_queue (target
, 1);
2413 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2415 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2416 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2422 temp
= gen_reg_rtx (mode
);
2424 if (GET_MODE (xop0
) != VOIDmode
2425 && GET_MODE (xop0
) != mode0
)
2426 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2428 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2430 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2431 xop0
= copy_to_mode_reg (mode0
, xop0
);
2433 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2434 temp
= gen_reg_rtx (mode
);
2436 pat
= GEN_FCN (icode
) (temp
, xop0
);
2439 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2440 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2442 delete_insns_since (last
);
2443 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2451 delete_insns_since (last
);
2454 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2456 /* Widening clz needs special treatment. */
2457 if (unoptab
== clz_optab
)
2459 temp
= widen_clz (mode
, op0
, target
);
2466 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2467 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2468 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2470 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2474 /* For certain operations, we need not actually extend
2475 the narrow operand, as long as we will truncate the
2476 results to the same narrowness. */
2478 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2479 (unoptab
== neg_optab
2480 || unoptab
== one_cmpl_optab
)
2481 && class == MODE_INT
);
2483 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2488 if (class != MODE_INT
)
2491 target
= gen_reg_rtx (mode
);
2492 convert_move (target
, temp
, 0);
2496 return gen_lowpart (mode
, temp
);
2499 delete_insns_since (last
);
2503 /* These can be done a word at a time. */
2504 if (unoptab
== one_cmpl_optab
2505 && class == MODE_INT
2506 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2507 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2512 if (target
== 0 || target
== op0
)
2513 target
= gen_reg_rtx (mode
);
2517 /* Do the actual arithmetic. */
2518 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2520 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2521 rtx x
= expand_unop (word_mode
, unoptab
,
2522 operand_subword_force (op0
, i
, mode
),
2523 target_piece
, unsignedp
);
2525 if (target_piece
!= x
)
2526 emit_move_insn (target_piece
, x
);
2529 insns
= get_insns ();
2532 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2533 gen_rtx_fmt_e (unoptab
->code
, mode
,
2538 /* Open-code the complex negation operation. */
2539 else if (unoptab
->code
== NEG
2540 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2546 /* Find the correct mode for the real and imaginary parts. */
2547 enum machine_mode submode
= GET_MODE_INNER (mode
);
2549 if (submode
== BLKmode
)
2553 target
= gen_reg_rtx (mode
);
2557 target_piece
= gen_imagpart (submode
, target
);
2558 x
= expand_unop (submode
, unoptab
,
2559 gen_imagpart (submode
, op0
),
2560 target_piece
, unsignedp
);
2561 if (target_piece
!= x
)
2562 emit_move_insn (target_piece
, x
);
2564 target_piece
= gen_realpart (submode
, target
);
2565 x
= expand_unop (submode
, unoptab
,
2566 gen_realpart (submode
, op0
),
2567 target_piece
, unsignedp
);
2568 if (target_piece
!= x
)
2569 emit_move_insn (target_piece
, x
);
2574 emit_no_conflict_block (seq
, target
, op0
, 0,
2575 gen_rtx_fmt_e (unoptab
->code
, mode
,
2580 /* Try negating floating point values by flipping the sign bit. */
2581 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2582 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2584 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2585 enum machine_mode imode
= int_mode_for_mode (mode
);
2586 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2588 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2590 HOST_WIDE_INT hi
, lo
;
2591 rtx last
= get_last_insn ();
2593 /* Handle targets with different FP word orders. */
2594 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2596 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2597 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2598 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2601 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2604 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2608 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2611 temp
= expand_binop (imode
, xor_optab
,
2612 gen_lowpart (imode
, op0
),
2613 immed_double_const (lo
, hi
, imode
),
2614 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2616 return gen_lowpart (mode
, temp
);
2617 delete_insns_since (last
);
2621 /* Try calculating parity (x) as popcount (x) % 2. */
2622 if (unoptab
== parity_optab
)
2624 temp
= expand_parity (mode
, op0
, target
);
2630 /* Now try a library call in this mode. */
2631 if (unoptab
->handlers
[(int) mode
].libfunc
)
2635 enum machine_mode outmode
= mode
;
2637 /* All of these functions return small values. Thus we choose to
2638 have them return something that isn't a double-word. */
2639 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2640 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2641 outmode
= TYPE_MODE (integer_type_node
);
2645 /* Pass 1 for NO_QUEUE so we don't lose any increments
2646 if the libcall is cse'd or moved. */
2647 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2648 NULL_RTX
, LCT_CONST
, outmode
,
2650 insns
= get_insns ();
2653 target
= gen_reg_rtx (outmode
);
2654 emit_libcall_block (insns
, target
, value
,
2655 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2660 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2661 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2663 /* It can't be done in this mode. Can we do it in a wider mode? */
2665 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2667 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2668 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2670 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2671 != CODE_FOR_nothing
)
2672 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2676 /* For certain operations, we need not actually extend
2677 the narrow operand, as long as we will truncate the
2678 results to the same narrowness. */
2680 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2681 (unoptab
== neg_optab
2682 || unoptab
== one_cmpl_optab
)
2683 && class == MODE_INT
);
2685 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2688 /* If we are generating clz using wider mode, adjust the
2690 if (unoptab
== clz_optab
&& temp
!= 0)
2691 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2692 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2693 - GET_MODE_BITSIZE (mode
)),
2694 target
, true, OPTAB_DIRECT
);
2698 if (class != MODE_INT
)
2701 target
= gen_reg_rtx (mode
);
2702 convert_move (target
, temp
, 0);
2706 return gen_lowpart (mode
, temp
);
2709 delete_insns_since (last
);
2714 /* If there is no negate operation, try doing a subtract from zero.
2715 The US Software GOFAST library needs this. */
2716 if (unoptab
->code
== NEG
)
2719 temp
= expand_binop (mode
,
2720 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2721 CONST0_RTX (mode
), op0
,
2722 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2730 /* Emit code to compute the absolute value of OP0, with result to
2731 TARGET if convenient. (TARGET may be 0.) The return value says
2732 where the result actually is to be found.
2734 MODE is the mode of the operand; the mode of the result is
2735 different but can be deduced from MODE.
2740 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2741 int result_unsignedp
)
2746 result_unsignedp
= 1;
2748 /* First try to do it with a special abs instruction. */
2749 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2754 /* For floating point modes, try clearing the sign bit. */
2755 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2756 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2758 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2759 enum machine_mode imode
= int_mode_for_mode (mode
);
2760 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2762 if (imode
!= BLKmode
&& bitpos
>= 0)
2764 HOST_WIDE_INT hi
, lo
;
2765 rtx last
= get_last_insn ();
2767 /* Handle targets with different FP word orders. */
2768 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2770 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2771 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2772 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2775 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2778 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2782 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2785 temp
= expand_binop (imode
, and_optab
,
2786 gen_lowpart (imode
, op0
),
2787 immed_double_const (~lo
, ~hi
, imode
),
2788 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2790 return gen_lowpart (mode
, temp
);
2791 delete_insns_since (last
);
2795 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2796 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2798 rtx last
= get_last_insn ();
2800 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2802 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2808 delete_insns_since (last
);
2811 /* If this machine has expensive jumps, we can do integer absolute
2812 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2813 where W is the width of MODE. */
2815 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2817 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2818 size_int (GET_MODE_BITSIZE (mode
) - 1),
2821 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2824 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2825 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2835 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2836 int result_unsignedp
, int safe
)
2841 result_unsignedp
= 1;
2843 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2847 /* If that does not win, use conditional jump and negate. */
2849 /* It is safe to use the target if it is the same
2850 as the source if this is also a pseudo register */
2851 if (op0
== target
&& GET_CODE (op0
) == REG
2852 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2855 op1
= gen_label_rtx ();
2856 if (target
== 0 || ! safe
2857 || GET_MODE (target
) != mode
2858 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2859 || (GET_CODE (target
) == REG
2860 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2861 target
= gen_reg_rtx (mode
);
2863 emit_move_insn (target
, op0
);
2866 /* If this mode is an integer too wide to compare properly,
2867 compare word by word. Rely on CSE to optimize constant cases. */
2868 if (GET_MODE_CLASS (mode
) == MODE_INT
2869 && ! can_compare_p (GE
, mode
, ccp_jump
))
2870 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2873 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2874 NULL_RTX
, NULL_RTX
, op1
);
2876 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2879 emit_move_insn (target
, op0
);
2885 /* Emit code to compute the absolute value of OP0, with result to
2886 TARGET if convenient. (TARGET may be 0.) The return value says
2887 where the result actually is to be found.
2889 MODE is the mode of the operand; the mode of the result is
2890 different but can be deduced from MODE.
2892 UNSIGNEDP is relevant for complex integer modes. */
2895 expand_complex_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2898 enum mode_class
class = GET_MODE_CLASS (mode
);
2899 enum machine_mode wider_mode
;
2901 rtx entry_last
= get_last_insn ();
2904 optab this_abs_optab
;
2906 /* Find the correct mode for the real and imaginary parts. */
2907 enum machine_mode submode
= GET_MODE_INNER (mode
);
2909 if (submode
== BLKmode
)
2912 op0
= protect_from_queue (op0
, 0);
2916 op0
= force_not_mem (op0
);
2919 last
= get_last_insn ();
2922 target
= protect_from_queue (target
, 1);
2924 this_abs_optab
= ! unsignedp
&& flag_trapv
2925 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2926 ? absv_optab
: abs_optab
;
2928 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2930 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2931 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2937 temp
= gen_reg_rtx (submode
);
2939 if (GET_MODE (xop0
) != VOIDmode
2940 && GET_MODE (xop0
) != mode0
)
2941 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2943 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2945 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2946 xop0
= copy_to_mode_reg (mode0
, xop0
);
2948 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2949 temp
= gen_reg_rtx (submode
);
2951 pat
= GEN_FCN (icode
) (temp
, xop0
);
2954 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2955 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2958 delete_insns_since (last
);
2959 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2968 delete_insns_since (last
);
2971 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2973 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2974 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2976 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2977 != CODE_FOR_nothing
)
2981 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2982 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2986 if (class != MODE_COMPLEX_INT
)
2989 target
= gen_reg_rtx (submode
);
2990 convert_move (target
, temp
, 0);
2994 return gen_lowpart (submode
, temp
);
2997 delete_insns_since (last
);
3001 /* Open-code the complex absolute-value operation
3002 if we can open-code sqrt. Otherwise it's not worth while. */
3003 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
3006 rtx real
, imag
, total
;
3008 real
= gen_realpart (submode
, op0
);
3009 imag
= gen_imagpart (submode
, op0
);
3011 /* Square both parts. */
3012 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
3013 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
3015 /* Sum the parts. */
3016 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
3017 0, OPTAB_LIB_WIDEN
);
3019 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3020 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
3022 delete_insns_since (last
);
3027 /* Now try a library call in this mode. */
3028 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
3035 /* Pass 1 for NO_QUEUE so we don't lose any increments
3036 if the libcall is cse'd or moved. */
3037 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
3038 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
3039 insns
= get_insns ();
3042 target
= gen_reg_rtx (submode
);
3043 emit_libcall_block (insns
, target
, value
,
3044 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
3049 /* It can't be done in this mode. Can we do it in a wider mode? */
3051 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3052 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3054 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3055 != CODE_FOR_nothing
)
3056 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
3060 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3062 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3066 if (class != MODE_COMPLEX_INT
)
3069 target
= gen_reg_rtx (submode
);
3070 convert_move (target
, temp
, 0);
3074 return gen_lowpart (submode
, temp
);
3077 delete_insns_since (last
);
3081 delete_insns_since (entry_last
);
3085 /* Generate an instruction whose insn-code is INSN_CODE,
3086 with two operands: an output TARGET and an input OP0.
3087 TARGET *must* be nonzero, and the output is always stored there.
3088 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3089 the value that is stored into TARGET. */
3092 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3095 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3098 temp
= target
= protect_from_queue (target
, 1);
3100 op0
= protect_from_queue (op0
, 0);
3102 /* Sign and zero extension from memory is often done specially on
3103 RISC machines, so forcing into a register here can pessimize
3105 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3106 op0
= force_not_mem (op0
);
3108 /* Now, if insn does not accept our operands, put them into pseudos. */
3110 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3111 op0
= copy_to_mode_reg (mode0
, op0
);
3113 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3114 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3115 temp
= gen_reg_rtx (GET_MODE (temp
));
3117 pat
= GEN_FCN (icode
) (temp
, op0
);
3119 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3120 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3125 emit_move_insn (target
, temp
);
3128 /* Emit code to perform a series of operations on a multi-word quantity, one
3131 Such a block is preceded by a CLOBBER of the output, consists of multiple
3132 insns, each setting one word of the output, and followed by a SET copying
3133 the output to itself.
3135 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3136 note indicating that it doesn't conflict with the (also multi-word)
3137 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3140 INSNS is a block of code generated to perform the operation, not including
3141 the CLOBBER and final copy. All insns that compute intermediate values
3142 are first emitted, followed by the block as described above.
3144 TARGET, OP0, and OP1 are the output and inputs of the operations,
3145 respectively. OP1 may be zero for a unary operation.
3147 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3150 If TARGET is not a register, INSNS is simply emitted with no special
3151 processing. Likewise if anything in INSNS is not an INSN or if
3152 there is a libcall block inside INSNS.
3154 The final insn emitted is returned. */
3157 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3159 rtx prev
, next
, first
, last
, insn
;
3161 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3162 return emit_insn (insns
);
3164 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3165 if (GET_CODE (insn
) != INSN
3166 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3167 return emit_insn (insns
);
3169 /* First emit all insns that do not store into words of the output and remove
3170 these from the list. */
3171 for (insn
= insns
; insn
; insn
= next
)
3176 next
= NEXT_INSN (insn
);
3178 /* Some ports (cris) create an libcall regions at their own. We must
3179 avoid any potential nesting of LIBCALLs. */
3180 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3181 remove_note (insn
, note
);
3182 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3183 remove_note (insn
, note
);
3185 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3186 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3187 set
= PATTERN (insn
);
3188 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3190 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3191 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3193 set
= XVECEXP (PATTERN (insn
), 0, i
);
3201 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3203 if (PREV_INSN (insn
))
3204 NEXT_INSN (PREV_INSN (insn
)) = next
;
3209 PREV_INSN (next
) = PREV_INSN (insn
);
3215 prev
= get_last_insn ();
3217 /* Now write the CLOBBER of the output, followed by the setting of each
3218 of the words, followed by the final copy. */
3219 if (target
!= op0
&& target
!= op1
)
3220 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3222 for (insn
= insns
; insn
; insn
= next
)
3224 next
= NEXT_INSN (insn
);
3227 if (op1
&& GET_CODE (op1
) == REG
)
3228 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3231 if (op0
&& GET_CODE (op0
) == REG
)
3232 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3236 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3237 != CODE_FOR_nothing
)
3239 last
= emit_move_insn (target
, target
);
3241 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3245 last
= get_last_insn ();
3247 /* Remove any existing REG_EQUAL note from "last", or else it will
3248 be mistaken for a note referring to the full contents of the
3249 alleged libcall value when found together with the REG_RETVAL
3250 note added below. An existing note can come from an insn
3251 expansion at "last". */
3252 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3256 first
= get_insns ();
3258 first
= NEXT_INSN (prev
);
3260 /* Encapsulate the block so it gets manipulated as a unit. */
3261 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3263 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3268 /* Emit code to make a call to a constant function or a library call.
3270 INSNS is a list containing all insns emitted in the call.
3271 These insns leave the result in RESULT. Our block is to copy RESULT
3272 to TARGET, which is logically equivalent to EQUIV.
3274 We first emit any insns that set a pseudo on the assumption that these are
3275 loading constants into registers; doing so allows them to be safely cse'ed
3276 between blocks. Then we emit all the other insns in the block, followed by
3277 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3278 note with an operand of EQUIV.
3280 Moving assignments to pseudos outside of the block is done to improve
3281 the generated code, but is not required to generate correct code,
3282 hence being unable to move an assignment is not grounds for not making
3283 a libcall block. There are two reasons why it is safe to leave these
3284 insns inside the block: First, we know that these pseudos cannot be
3285 used in generated RTL outside the block since they are created for
3286 temporary purposes within the block. Second, CSE will not record the
3287 values of anything set inside a libcall block, so we know they must
3288 be dead at the end of the block.
3290 Except for the first group of insns (the ones setting pseudos), the
3291 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3294 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3296 rtx final_dest
= target
;
3297 rtx prev
, next
, first
, last
, insn
;
3299 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3300 into a MEM later. Protect the libcall block from this change. */
3301 if (! REG_P (target
) || REG_USERVAR_P (target
))
3302 target
= gen_reg_rtx (GET_MODE (target
));
3304 /* If we're using non-call exceptions, a libcall corresponding to an
3305 operation that may trap may also trap. */
3306 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3308 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3309 if (GET_CODE (insn
) == CALL_INSN
)
3311 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3313 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3314 remove_note (insn
, note
);
3318 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3319 reg note to indicate that this call cannot throw or execute a nonlocal
3320 goto (unless there is already a REG_EH_REGION note, in which case
3322 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3323 if (GET_CODE (insn
) == CALL_INSN
)
3325 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3328 XEXP (note
, 0) = GEN_INT (-1);
3330 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3334 /* First emit all insns that set pseudos. Remove them from the list as
3335 we go. Avoid insns that set pseudos which were referenced in previous
3336 insns. These can be generated by move_by_pieces, for example,
3337 to update an address. Similarly, avoid insns that reference things
3338 set in previous insns. */
3340 for (insn
= insns
; insn
; insn
= next
)
3342 rtx set
= single_set (insn
);
3345 /* Some ports (cris) create an libcall regions at their own. We must
3346 avoid any potential nesting of LIBCALLs. */
3347 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3348 remove_note (insn
, note
);
3349 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3350 remove_note (insn
, note
);
3352 next
= NEXT_INSN (insn
);
3354 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3355 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3357 || ((! INSN_P(insns
)
3358 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3359 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3360 && ! modified_in_p (SET_SRC (set
), insns
)
3361 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3363 if (PREV_INSN (insn
))
3364 NEXT_INSN (PREV_INSN (insn
)) = next
;
3369 PREV_INSN (next
) = PREV_INSN (insn
);
3374 /* Some ports use a loop to copy large arguments onto the stack.
3375 Don't move anything outside such a loop. */
3376 if (GET_CODE (insn
) == CODE_LABEL
)
3380 prev
= get_last_insn ();
3382 /* Write the remaining insns followed by the final copy. */
3384 for (insn
= insns
; insn
; insn
= next
)
3386 next
= NEXT_INSN (insn
);
3391 last
= emit_move_insn (target
, result
);
3392 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3393 != CODE_FOR_nothing
)
3394 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3397 /* Remove any existing REG_EQUAL note from "last", or else it will
3398 be mistaken for a note referring to the full contents of the
3399 libcall value when found together with the REG_RETVAL note added
3400 below. An existing note can come from an insn expansion at
3402 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3405 if (final_dest
!= target
)
3406 emit_move_insn (final_dest
, target
);
3409 first
= get_insns ();
3411 first
= NEXT_INSN (prev
);
3413 /* Encapsulate the block so it gets manipulated as a unit. */
3414 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3416 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3417 when the encapsulated region would not be in one basic block,
3418 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3420 bool attach_libcall_retval_notes
= true;
3421 next
= NEXT_INSN (last
);
3422 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3423 if (control_flow_insn_p (insn
))
3425 attach_libcall_retval_notes
= false;
3429 if (attach_libcall_retval_notes
)
3431 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3433 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3439 /* Generate code to store zero in X. */
3442 emit_clr_insn (rtx x
)
3444 emit_move_insn (x
, const0_rtx
);
3447 /* Generate code to store 1 in X
3448 assuming it contains zero beforehand. */
3451 emit_0_to_1_insn (rtx x
)
3453 emit_move_insn (x
, const1_rtx
);
3456 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3457 PURPOSE describes how this comparison will be used. CODE is the rtx
3458 comparison code we will be using.
3460 ??? Actually, CODE is slightly weaker than that. A target is still
3461 required to implement all of the normal bcc operations, but not
3462 required to implement all (or any) of the unordered bcc operations. */
3465 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3466 enum can_compare_purpose purpose
)
3470 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3472 if (purpose
== ccp_jump
)
3473 return bcc_gen_fctn
[(int) code
] != NULL
;
3474 else if (purpose
== ccp_store_flag
)
3475 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3477 /* There's only one cmov entry point, and it's allowed to fail. */
3480 if (purpose
== ccp_jump
3481 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3483 if (purpose
== ccp_cmov
3484 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3486 if (purpose
== ccp_store_flag
3487 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3490 mode
= GET_MODE_WIDER_MODE (mode
);
3492 while (mode
!= VOIDmode
);
3497 /* This function is called when we are going to emit a compare instruction that
3498 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3500 *PMODE is the mode of the inputs (in case they are const_int).
3501 *PUNSIGNEDP nonzero says that the operands are unsigned;
3502 this matters if they need to be widened.
3504 If they have mode BLKmode, then SIZE specifies the size of both operands.
3506 This function performs all the setup necessary so that the caller only has
3507 to emit a single comparison insn. This setup can involve doing a BLKmode
3508 comparison or emitting a library call to perform the comparison if no insn
3509 is available to handle it.
3510 The values which are passed in through pointers can be modified; the caller
3511 should perform the comparison on the modified values. */
3514 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3515 enum machine_mode
*pmode
, int *punsignedp
,
3516 enum can_compare_purpose purpose
)
3518 enum machine_mode mode
= *pmode
;
3519 rtx x
= *px
, y
= *py
;
3520 int unsignedp
= *punsignedp
;
3521 enum mode_class
class;
3523 class = GET_MODE_CLASS (mode
);
3525 /* They could both be VOIDmode if both args are immediate constants,
3526 but we should fold that at an earlier stage.
3527 With no special code here, this will call abort,
3528 reminding the programmer to implement such folding. */
3530 if (mode
!= BLKmode
&& flag_force_mem
)
3532 /* Load duplicate non-volatile operands once. */
3533 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3535 x
= force_not_mem (x
);
3540 x
= force_not_mem (x
);
3541 y
= force_not_mem (y
);
3545 /* If we are inside an appropriately-short loop and one operand is an
3546 expensive constant, force it into a register. */
3547 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3548 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3549 x
= force_reg (mode
, x
);
3551 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3552 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3553 y
= force_reg (mode
, y
);
3556 /* Abort if we have a non-canonical comparison. The RTL documentation
3557 states that canonical comparisons are required only for targets which
3559 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3563 /* Don't let both operands fail to indicate the mode. */
3564 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3565 x
= force_reg (mode
, x
);
3567 /* Handle all BLKmode compares. */
3569 if (mode
== BLKmode
)
3572 enum machine_mode result_mode
;
3573 rtx opalign ATTRIBUTE_UNUSED
3574 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3577 x
= protect_from_queue (x
, 0);
3578 y
= protect_from_queue (y
, 0);
3582 #ifdef HAVE_cmpmemqi
3584 && GET_CODE (size
) == CONST_INT
3585 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3587 result_mode
= insn_data
[(int) CODE_FOR_cmpmemqi
].operand
[0].mode
;
3588 result
= gen_reg_rtx (result_mode
);
3589 emit_insn (gen_cmpmemqi (result
, x
, y
, size
, opalign
));
3593 #ifdef HAVE_cmpmemhi
3595 && GET_CODE (size
) == CONST_INT
3596 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3598 result_mode
= insn_data
[(int) CODE_FOR_cmpmemhi
].operand
[0].mode
;
3599 result
= gen_reg_rtx (result_mode
);
3600 emit_insn (gen_cmpmemhi (result
, x
, y
, size
, opalign
));
3604 #ifdef HAVE_cmpmemsi
3607 result_mode
= insn_data
[(int) CODE_FOR_cmpmemsi
].operand
[0].mode
;
3608 result
= gen_reg_rtx (result_mode
);
3609 size
= protect_from_queue (size
, 0);
3610 emit_insn (gen_cmpmemsi (result
, x
, y
,
3611 convert_to_mode (SImode
, size
, 1),
3616 #ifdef HAVE_cmpstrqi
3618 && GET_CODE (size
) == CONST_INT
3619 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3621 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3622 result
= gen_reg_rtx (result_mode
);
3623 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3627 #ifdef HAVE_cmpstrhi
3629 && GET_CODE (size
) == CONST_INT
3630 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3632 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3633 result
= gen_reg_rtx (result_mode
);
3634 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3638 #ifdef HAVE_cmpstrsi
3641 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3642 result
= gen_reg_rtx (result_mode
);
3643 size
= protect_from_queue (size
, 0);
3644 emit_insn (gen_cmpstrsi (result
, x
, y
,
3645 convert_to_mode (SImode
, size
, 1),
3651 #ifdef TARGET_MEM_FUNCTIONS
3652 result
= emit_library_call_value (memcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3653 TYPE_MODE (integer_type_node
), 3,
3654 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3655 convert_to_mode (TYPE_MODE (sizetype
), size
,
3656 TREE_UNSIGNED (sizetype
)),
3657 TYPE_MODE (sizetype
));
3659 result
= emit_library_call_value (bcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3660 TYPE_MODE (integer_type_node
), 3,
3661 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3662 convert_to_mode (TYPE_MODE (integer_type_node
),
3664 TREE_UNSIGNED (integer_type_node
)),
3665 TYPE_MODE (integer_type_node
));
3668 result_mode
= TYPE_MODE (integer_type_node
);
3672 *pmode
= result_mode
;
3678 if (can_compare_p (*pcomparison
, mode
, purpose
))
3681 /* Handle a lib call just for the mode we are using. */
3683 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3685 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3688 /* If we want unsigned, and this mode has a distinct unsigned
3689 comparison routine, use that. */
3690 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3691 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3693 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3694 word_mode
, 2, x
, mode
, y
, mode
);
3696 /* Integer comparison returns a result that must be compared against 1,
3697 so that even if we do an unsigned compare afterward,
3698 there is still a value that can represent the result "less than". */
3705 if (class == MODE_FLOAT
)
3706 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3712 /* Before emitting an insn with code ICODE, make sure that X, which is going
3713 to be used for operand OPNUM of the insn, is converted from mode MODE to
3714 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3715 that it is accepted by the operand predicate. Return the new value. */
3718 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3719 enum machine_mode wider_mode
, int unsignedp
)
3721 x
= protect_from_queue (x
, 0);
3723 if (mode
!= wider_mode
)
3724 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3726 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3727 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3731 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3737 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3738 we can do the comparison.
3739 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3740 be NULL_RTX which indicates that only a comparison is to be generated. */
3743 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3744 enum rtx_code comparison
, int unsignedp
, rtx label
)
3746 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3747 enum mode_class
class = GET_MODE_CLASS (mode
);
3748 enum machine_mode wider_mode
= mode
;
3750 /* Try combined insns first. */
3753 enum insn_code icode
;
3754 PUT_MODE (test
, wider_mode
);
3758 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3760 if (icode
!= CODE_FOR_nothing
3761 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3763 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3764 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3765 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3770 /* Handle some compares against zero. */
3771 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3772 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3774 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3775 emit_insn (GEN_FCN (icode
) (x
));
3777 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3781 /* Handle compares for which there is a directly suitable insn. */
3783 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3784 if (icode
!= CODE_FOR_nothing
)
3786 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3787 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3788 emit_insn (GEN_FCN (icode
) (x
, y
));
3790 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3794 if (class != MODE_INT
&& class != MODE_FLOAT
3795 && class != MODE_COMPLEX_FLOAT
)
3798 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3800 while (wider_mode
!= VOIDmode
);
3805 /* Generate code to compare X with Y so that the condition codes are
3806 set and to jump to LABEL if the condition is true. If X is a
3807 constant and Y is not a constant, then the comparison is swapped to
3808 ensure that the comparison RTL has the canonical form.
3810 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3811 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3812 the proper branch condition code.
3814 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3816 MODE is the mode of the inputs (in case they are const_int).
3818 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3819 be passed unchanged to emit_cmp_insn, then potentially converted into an
3820 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3823 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3824 enum machine_mode mode
, int unsignedp
, rtx label
)
3826 rtx op0
= x
, op1
= y
;
3828 /* Swap operands and condition to ensure canonical RTL. */
3829 if (swap_commutative_operands_p (x
, y
))
3831 /* If we're not emitting a branch, this means some caller
3837 comparison
= swap_condition (comparison
);
3841 /* If OP0 is still a constant, then both X and Y must be constants. Force
3842 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3844 if (CONSTANT_P (op0
))
3845 op0
= force_reg (mode
, op0
);
3850 comparison
= unsigned_condition (comparison
);
3852 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3854 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3857 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3860 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3861 enum machine_mode mode
, int unsignedp
)
3863 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3866 /* Emit a library call comparison between floating point X and Y.
3867 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3870 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3871 enum machine_mode
*pmode
, int *punsignedp
)
3873 enum rtx_code comparison
= *pcomparison
;
3875 rtx x
= *px
= protect_from_queue (*px
, 0);
3876 rtx y
= *py
= protect_from_queue (*py
, 0);
3877 enum machine_mode mode
= GET_MODE (x
);
3885 libfunc
= eqhf2_libfunc
;
3889 libfunc
= nehf2_libfunc
;
3893 libfunc
= gthf2_libfunc
;
3894 if (libfunc
== NULL_RTX
)
3896 tmp
= x
; x
= y
; y
= tmp
;
3898 libfunc
= lthf2_libfunc
;
3903 libfunc
= gehf2_libfunc
;
3904 if (libfunc
== NULL_RTX
)
3906 tmp
= x
; x
= y
; y
= tmp
;
3908 libfunc
= lehf2_libfunc
;
3913 libfunc
= lthf2_libfunc
;
3914 if (libfunc
== NULL_RTX
)
3916 tmp
= x
; x
= y
; y
= tmp
;
3918 libfunc
= gthf2_libfunc
;
3923 libfunc
= lehf2_libfunc
;
3924 if (libfunc
== NULL_RTX
)
3926 tmp
= x
; x
= y
; y
= tmp
;
3928 libfunc
= gehf2_libfunc
;
3933 libfunc
= unordhf2_libfunc
;
3939 else if (mode
== SFmode
)
3943 libfunc
= eqsf2_libfunc
;
3947 libfunc
= nesf2_libfunc
;
3951 libfunc
= gtsf2_libfunc
;
3952 if (libfunc
== NULL_RTX
)
3954 tmp
= x
; x
= y
; y
= tmp
;
3956 libfunc
= ltsf2_libfunc
;
3961 libfunc
= gesf2_libfunc
;
3962 if (libfunc
== NULL_RTX
)
3964 tmp
= x
; x
= y
; y
= tmp
;
3966 libfunc
= lesf2_libfunc
;
3971 libfunc
= ltsf2_libfunc
;
3972 if (libfunc
== NULL_RTX
)
3974 tmp
= x
; x
= y
; y
= tmp
;
3976 libfunc
= gtsf2_libfunc
;
3981 libfunc
= lesf2_libfunc
;
3982 if (libfunc
== NULL_RTX
)
3984 tmp
= x
; x
= y
; y
= tmp
;
3986 libfunc
= gesf2_libfunc
;
3991 libfunc
= unordsf2_libfunc
;
3997 else if (mode
== DFmode
)
4001 libfunc
= eqdf2_libfunc
;
4005 libfunc
= nedf2_libfunc
;
4009 libfunc
= gtdf2_libfunc
;
4010 if (libfunc
== NULL_RTX
)
4012 tmp
= x
; x
= y
; y
= tmp
;
4014 libfunc
= ltdf2_libfunc
;
4019 libfunc
= gedf2_libfunc
;
4020 if (libfunc
== NULL_RTX
)
4022 tmp
= x
; x
= y
; y
= tmp
;
4024 libfunc
= ledf2_libfunc
;
4029 libfunc
= ltdf2_libfunc
;
4030 if (libfunc
== NULL_RTX
)
4032 tmp
= x
; x
= y
; y
= tmp
;
4034 libfunc
= gtdf2_libfunc
;
4039 libfunc
= ledf2_libfunc
;
4040 if (libfunc
== NULL_RTX
)
4042 tmp
= x
; x
= y
; y
= tmp
;
4044 libfunc
= gedf2_libfunc
;
4049 libfunc
= unorddf2_libfunc
;
4055 else if (mode
== XFmode
)
4059 libfunc
= eqxf2_libfunc
;
4063 libfunc
= nexf2_libfunc
;
4067 libfunc
= gtxf2_libfunc
;
4068 if (libfunc
== NULL_RTX
)
4070 tmp
= x
; x
= y
; y
= tmp
;
4072 libfunc
= ltxf2_libfunc
;
4077 libfunc
= gexf2_libfunc
;
4078 if (libfunc
== NULL_RTX
)
4080 tmp
= x
; x
= y
; y
= tmp
;
4082 libfunc
= lexf2_libfunc
;
4087 libfunc
= ltxf2_libfunc
;
4088 if (libfunc
== NULL_RTX
)
4090 tmp
= x
; x
= y
; y
= tmp
;
4092 libfunc
= gtxf2_libfunc
;
4097 libfunc
= lexf2_libfunc
;
4098 if (libfunc
== NULL_RTX
)
4100 tmp
= x
; x
= y
; y
= tmp
;
4102 libfunc
= gexf2_libfunc
;
4107 libfunc
= unordxf2_libfunc
;
4113 else if (mode
== TFmode
)
4117 libfunc
= eqtf2_libfunc
;
4121 libfunc
= netf2_libfunc
;
4125 libfunc
= gttf2_libfunc
;
4126 if (libfunc
== NULL_RTX
)
4128 tmp
= x
; x
= y
; y
= tmp
;
4130 libfunc
= lttf2_libfunc
;
4135 libfunc
= getf2_libfunc
;
4136 if (libfunc
== NULL_RTX
)
4138 tmp
= x
; x
= y
; y
= tmp
;
4140 libfunc
= letf2_libfunc
;
4145 libfunc
= lttf2_libfunc
;
4146 if (libfunc
== NULL_RTX
)
4148 tmp
= x
; x
= y
; y
= tmp
;
4150 libfunc
= gttf2_libfunc
;
4155 libfunc
= letf2_libfunc
;
4156 if (libfunc
== NULL_RTX
)
4158 tmp
= x
; x
= y
; y
= tmp
;
4160 libfunc
= getf2_libfunc
;
4165 libfunc
= unordtf2_libfunc
;
4173 enum machine_mode wider_mode
;
4175 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
4176 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
4178 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
4179 != CODE_FOR_nothing
)
4180 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
4182 x
= protect_from_queue (x
, 0);
4183 y
= protect_from_queue (y
, 0);
4184 *px
= convert_to_mode (wider_mode
, x
, 0);
4185 *py
= convert_to_mode (wider_mode
, y
, 0);
4186 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4196 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4197 word_mode
, 2, x
, mode
, y
, mode
);
4201 if (comparison
== UNORDERED
)
4203 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
4204 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4210 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4213 emit_indirect_jump (rtx loc
)
4215 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
4217 loc
= copy_to_mode_reg (Pmode
, loc
);
4219 emit_jump_insn (gen_indirect_jump (loc
));
4223 #ifdef HAVE_conditional_move
4225 /* Emit a conditional move instruction if the machine supports one for that
4226 condition and machine mode.
4228 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4229 the mode to use should they be constants. If it is VOIDmode, they cannot
4232 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4233 should be stored there. MODE is the mode to use should they be constants.
4234 If it is VOIDmode, they cannot both be constants.
4236 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4237 is not supported. */
4240 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4241 enum machine_mode cmode
, rtx op2
, rtx op3
,
4242 enum machine_mode mode
, int unsignedp
)
4244 rtx tem
, subtarget
, comparison
, insn
;
4245 enum insn_code icode
;
4246 enum rtx_code reversed
;
4248 /* If one operand is constant, make it the second one. Only do this
4249 if the other operand is not constant as well. */
4251 if (swap_commutative_operands_p (op0
, op1
))
4256 code
= swap_condition (code
);
4259 /* get_condition will prefer to generate LT and GT even if the old
4260 comparison was against zero, so undo that canonicalization here since
4261 comparisons against zero are cheaper. */
4262 if (code
== LT
&& op1
== const1_rtx
)
4263 code
= LE
, op1
= const0_rtx
;
4264 else if (code
== GT
&& op1
== constm1_rtx
)
4265 code
= GE
, op1
= const0_rtx
;
4267 if (cmode
== VOIDmode
)
4268 cmode
= GET_MODE (op0
);
4270 if (swap_commutative_operands_p (op2
, op3
)
4271 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4280 if (mode
== VOIDmode
)
4281 mode
= GET_MODE (op2
);
4283 icode
= movcc_gen_code
[mode
];
4285 if (icode
== CODE_FOR_nothing
)
4290 op2
= force_not_mem (op2
);
4291 op3
= force_not_mem (op3
);
4295 target
= protect_from_queue (target
, 1);
4297 target
= gen_reg_rtx (mode
);
4303 op2
= protect_from_queue (op2
, 0);
4304 op3
= protect_from_queue (op3
, 0);
4306 /* If the insn doesn't accept these operands, put them in pseudos. */
4308 if (! (*insn_data
[icode
].operand
[0].predicate
)
4309 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4310 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4312 if (! (*insn_data
[icode
].operand
[2].predicate
)
4313 (op2
, insn_data
[icode
].operand
[2].mode
))
4314 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4316 if (! (*insn_data
[icode
].operand
[3].predicate
)
4317 (op3
, insn_data
[icode
].operand
[3].mode
))
4318 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4320 /* Everything should now be in the suitable form, so emit the compare insn
4321 and then the conditional move. */
4324 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4326 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4327 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4328 return NULL and let the caller figure out how best to deal with this
4330 if (GET_CODE (comparison
) != code
)
4333 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4335 /* If that failed, then give up. */
4341 if (subtarget
!= target
)
4342 convert_move (target
, subtarget
, 0);
4347 /* Return nonzero if a conditional move of mode MODE is supported.
4349 This function is for combine so it can tell whether an insn that looks
4350 like a conditional move is actually supported by the hardware. If we
4351 guess wrong we lose a bit on optimization, but that's it. */
4352 /* ??? sparc64 supports conditionally moving integers values based on fp
4353 comparisons, and vice versa. How do we handle them? */
4356 can_conditionally_move_p (enum machine_mode mode
)
4358 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4364 #endif /* HAVE_conditional_move */
4366 /* Emit a conditional addition instruction if the machine supports one for that
4367 condition and machine mode.
4369 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4370 the mode to use should they be constants. If it is VOIDmode, they cannot
4373 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4374 should be stored there. MODE is the mode to use should they be constants.
4375 If it is VOIDmode, they cannot both be constants.
4377 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4378 is not supported. */
4381 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4382 enum machine_mode cmode
, rtx op2
, rtx op3
,
4383 enum machine_mode mode
, int unsignedp
)
4385 rtx tem
, subtarget
, comparison
, insn
;
4386 enum insn_code icode
;
4387 enum rtx_code reversed
;
4389 /* If one operand is constant, make it the second one. Only do this
4390 if the other operand is not constant as well. */
4392 if (swap_commutative_operands_p (op0
, op1
))
4397 code
= swap_condition (code
);
4400 /* get_condition will prefer to generate LT and GT even if the old
4401 comparison was against zero, so undo that canonicalization here since
4402 comparisons against zero are cheaper. */
4403 if (code
== LT
&& op1
== const1_rtx
)
4404 code
= LE
, op1
= const0_rtx
;
4405 else if (code
== GT
&& op1
== constm1_rtx
)
4406 code
= GE
, op1
= const0_rtx
;
4408 if (cmode
== VOIDmode
)
4409 cmode
= GET_MODE (op0
);
4411 if (swap_commutative_operands_p (op2
, op3
)
4412 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4421 if (mode
== VOIDmode
)
4422 mode
= GET_MODE (op2
);
4424 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4426 if (icode
== CODE_FOR_nothing
)
4431 op2
= force_not_mem (op2
);
4432 op3
= force_not_mem (op3
);
4436 target
= protect_from_queue (target
, 1);
4438 target
= gen_reg_rtx (mode
);
4444 op2
= protect_from_queue (op2
, 0);
4445 op3
= protect_from_queue (op3
, 0);
4447 /* If the insn doesn't accept these operands, put them in pseudos. */
4449 if (! (*insn_data
[icode
].operand
[0].predicate
)
4450 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4451 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4453 if (! (*insn_data
[icode
].operand
[2].predicate
)
4454 (op2
, insn_data
[icode
].operand
[2].mode
))
4455 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4457 if (! (*insn_data
[icode
].operand
[3].predicate
)
4458 (op3
, insn_data
[icode
].operand
[3].mode
))
4459 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4461 /* Everything should now be in the suitable form, so emit the compare insn
4462 and then the conditional move. */
4465 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4467 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4468 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4469 return NULL and let the caller figure out how best to deal with this
4471 if (GET_CODE (comparison
) != code
)
4474 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4476 /* If that failed, then give up. */
4482 if (subtarget
!= target
)
4483 convert_move (target
, subtarget
, 0);
4488 /* These functions attempt to generate an insn body, rather than
4489 emitting the insn, but if the gen function already emits them, we
4490 make no attempt to turn them back into naked patterns.
4492 They do not protect from queued increments,
4493 because they may be used 1) in protect_from_queue itself
4494 and 2) in other passes where there is no queue. */
4496 /* Generate and return an insn body to add Y to X. */
4499 gen_add2_insn (rtx x
, rtx y
)
4501 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4503 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4504 (x
, insn_data
[icode
].operand
[0].mode
))
4505 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4506 (x
, insn_data
[icode
].operand
[1].mode
))
4507 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4508 (y
, insn_data
[icode
].operand
[2].mode
)))
4511 return (GEN_FCN (icode
) (x
, x
, y
));
4514 /* Generate and return an insn body to add r1 and c,
4515 storing the result in r0. */
4517 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4519 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4521 if (icode
== CODE_FOR_nothing
4522 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4523 (r0
, insn_data
[icode
].operand
[0].mode
))
4524 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4525 (r1
, insn_data
[icode
].operand
[1].mode
))
4526 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4527 (c
, insn_data
[icode
].operand
[2].mode
)))
4530 return (GEN_FCN (icode
) (r0
, r1
, c
));
4534 have_add2_insn (rtx x
, rtx y
)
4538 if (GET_MODE (x
) == VOIDmode
)
4541 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4543 if (icode
== CODE_FOR_nothing
)
4546 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4547 (x
, insn_data
[icode
].operand
[0].mode
))
4548 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4549 (x
, insn_data
[icode
].operand
[1].mode
))
4550 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4551 (y
, insn_data
[icode
].operand
[2].mode
)))
4557 /* Generate and return an insn body to subtract Y from X. */
4560 gen_sub2_insn (rtx x
, rtx y
)
4562 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4564 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4565 (x
, insn_data
[icode
].operand
[0].mode
))
4566 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4567 (x
, insn_data
[icode
].operand
[1].mode
))
4568 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4569 (y
, insn_data
[icode
].operand
[2].mode
)))
4572 return (GEN_FCN (icode
) (x
, x
, y
));
4575 /* Generate and return an insn body to subtract r1 and c,
4576 storing the result in r0. */
4578 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4580 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4582 if (icode
== CODE_FOR_nothing
4583 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4584 (r0
, insn_data
[icode
].operand
[0].mode
))
4585 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4586 (r1
, insn_data
[icode
].operand
[1].mode
))
4587 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4588 (c
, insn_data
[icode
].operand
[2].mode
)))
4591 return (GEN_FCN (icode
) (r0
, r1
, c
));
4595 have_sub2_insn (rtx x
, rtx y
)
4599 if (GET_MODE (x
) == VOIDmode
)
4602 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4604 if (icode
== CODE_FOR_nothing
)
4607 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4608 (x
, insn_data
[icode
].operand
[0].mode
))
4609 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4610 (x
, insn_data
[icode
].operand
[1].mode
))
4611 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4612 (y
, insn_data
[icode
].operand
[2].mode
)))
4618 /* Generate the body of an instruction to copy Y into X.
4619 It may be a list of insns, if one insn isn't enough. */
4622 gen_move_insn (rtx x
, rtx y
)
4627 emit_move_insn_1 (x
, y
);
4633 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4634 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4635 no such operation exists, CODE_FOR_nothing will be returned. */
4638 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4641 #ifdef HAVE_ptr_extend
4643 return CODE_FOR_ptr_extend
;
4646 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4649 /* Generate the body of an insn to extend Y (with mode MFROM)
4650 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4653 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4654 enum machine_mode mfrom
, int unsignedp
)
4656 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4659 /* can_fix_p and can_float_p say whether the target machine
4660 can directly convert a given fixed point type to
4661 a given floating point type, or vice versa.
4662 The returned value is the CODE_FOR_... value to use,
4663 or CODE_FOR_nothing if these modes cannot be directly converted.
4665 *TRUNCP_PTR is set to 1 if it is necessary to output
4666 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4668 static enum insn_code
4669 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4670 int unsignedp
, int *truncp_ptr
)
4673 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4674 != CODE_FOR_nothing
)
4675 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4677 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4680 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4682 return CODE_FOR_nothing
;
4685 static enum insn_code
4686 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4689 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4692 /* Generate code to convert FROM to floating point
4693 and store in TO. FROM must be fixed point and not VOIDmode.
4694 UNSIGNEDP nonzero means regard FROM as unsigned.
4695 Normally this is done by correcting the final value
4696 if it is negative. */
4699 expand_float (rtx to
, rtx from
, int unsignedp
)
4701 enum insn_code icode
;
4703 enum machine_mode fmode
, imode
;
4705 /* Crash now, because we won't be able to decide which mode to use. */
4706 if (GET_MODE (from
) == VOIDmode
)
4709 /* Look for an insn to do the conversion. Do it in the specified
4710 modes if possible; otherwise convert either input, output or both to
4711 wider mode. If the integer mode is wider than the mode of FROM,
4712 we can do the conversion signed even if the input is unsigned. */
4714 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4715 fmode
= GET_MODE_WIDER_MODE (fmode
))
4716 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4717 imode
= GET_MODE_WIDER_MODE (imode
))
4719 int doing_unsigned
= unsignedp
;
4721 if (fmode
!= GET_MODE (to
)
4722 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4725 icode
= can_float_p (fmode
, imode
, unsignedp
);
4726 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4727 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4729 if (icode
!= CODE_FOR_nothing
)
4731 to
= protect_from_queue (to
, 1);
4732 from
= protect_from_queue (from
, 0);
4734 if (imode
!= GET_MODE (from
))
4735 from
= convert_to_mode (imode
, from
, unsignedp
);
4737 if (fmode
!= GET_MODE (to
))
4738 target
= gen_reg_rtx (fmode
);
4740 emit_unop_insn (icode
, target
, from
,
4741 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4744 convert_move (to
, target
, 0);
4749 /* Unsigned integer, and no way to convert directly.
4750 Convert as signed, then conditionally adjust the result. */
4753 rtx label
= gen_label_rtx ();
4755 REAL_VALUE_TYPE offset
;
4759 to
= protect_from_queue (to
, 1);
4760 from
= protect_from_queue (from
, 0);
4763 from
= force_not_mem (from
);
4765 /* Look for a usable floating mode FMODE wider than the source and at
4766 least as wide as the target. Using FMODE will avoid rounding woes
4767 with unsigned values greater than the signed maximum value. */
4769 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4770 fmode
= GET_MODE_WIDER_MODE (fmode
))
4771 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4772 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4775 if (fmode
== VOIDmode
)
4777 /* There is no such mode. Pretend the target is wide enough. */
4778 fmode
= GET_MODE (to
);
4780 /* Avoid double-rounding when TO is narrower than FROM. */
4781 if ((significand_size (fmode
) + 1)
4782 < GET_MODE_BITSIZE (GET_MODE (from
)))
4785 rtx neglabel
= gen_label_rtx ();
4787 /* Don't use TARGET if it isn't a register, is a hard register,
4788 or is the wrong mode. */
4789 if (GET_CODE (target
) != REG
4790 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4791 || GET_MODE (target
) != fmode
)
4792 target
= gen_reg_rtx (fmode
);
4794 imode
= GET_MODE (from
);
4795 do_pending_stack_adjust ();
4797 /* Test whether the sign bit is set. */
4798 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4801 /* The sign bit is not set. Convert as signed. */
4802 expand_float (target
, from
, 0);
4803 emit_jump_insn (gen_jump (label
));
4806 /* The sign bit is set.
4807 Convert to a usable (positive signed) value by shifting right
4808 one bit, while remembering if a nonzero bit was shifted
4809 out; i.e., compute (from & 1) | (from >> 1). */
4811 emit_label (neglabel
);
4812 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4813 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4814 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4816 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4818 expand_float (target
, temp
, 0);
4820 /* Multiply by 2 to undo the shift above. */
4821 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4822 target
, 0, OPTAB_LIB_WIDEN
);
4824 emit_move_insn (target
, temp
);
4826 do_pending_stack_adjust ();
4832 /* If we are about to do some arithmetic to correct for an
4833 unsigned operand, do it in a pseudo-register. */
4835 if (GET_MODE (to
) != fmode
4836 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4837 target
= gen_reg_rtx (fmode
);
4839 /* Convert as signed integer to floating. */
4840 expand_float (target
, from
, 0);
4842 /* If FROM is negative (and therefore TO is negative),
4843 correct its value by 2**bitwidth. */
4845 do_pending_stack_adjust ();
4846 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4850 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4851 temp
= expand_binop (fmode
, add_optab
, target
,
4852 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4853 target
, 0, OPTAB_LIB_WIDEN
);
4855 emit_move_insn (target
, temp
);
4857 do_pending_stack_adjust ();
4862 /* No hardware instruction available; call a library routine to convert from
4863 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4869 to
= protect_from_queue (to
, 1);
4870 from
= protect_from_queue (from
, 0);
4872 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4873 from
= convert_to_mode (SImode
, from
, unsignedp
);
4876 from
= force_not_mem (from
);
4878 if (GET_MODE (to
) == SFmode
)
4880 if (GET_MODE (from
) == SImode
)
4881 libfcn
= floatsisf_libfunc
;
4882 else if (GET_MODE (from
) == DImode
)
4883 libfcn
= floatdisf_libfunc
;
4884 else if (GET_MODE (from
) == TImode
)
4885 libfcn
= floattisf_libfunc
;
4889 else if (GET_MODE (to
) == DFmode
)
4891 if (GET_MODE (from
) == SImode
)
4892 libfcn
= floatsidf_libfunc
;
4893 else if (GET_MODE (from
) == DImode
)
4894 libfcn
= floatdidf_libfunc
;
4895 else if (GET_MODE (from
) == TImode
)
4896 libfcn
= floattidf_libfunc
;
4900 else if (GET_MODE (to
) == XFmode
)
4902 if (GET_MODE (from
) == SImode
)
4903 libfcn
= floatsixf_libfunc
;
4904 else if (GET_MODE (from
) == DImode
)
4905 libfcn
= floatdixf_libfunc
;
4906 else if (GET_MODE (from
) == TImode
)
4907 libfcn
= floattixf_libfunc
;
4911 else if (GET_MODE (to
) == TFmode
)
4913 if (GET_MODE (from
) == SImode
)
4914 libfcn
= floatsitf_libfunc
;
4915 else if (GET_MODE (from
) == DImode
)
4916 libfcn
= floatditf_libfunc
;
4917 else if (GET_MODE (from
) == TImode
)
4918 libfcn
= floattitf_libfunc
;
4927 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4928 GET_MODE (to
), 1, from
,
4930 insns
= get_insns ();
4933 emit_libcall_block (insns
, target
, value
,
4934 gen_rtx_FLOAT (GET_MODE (to
), from
));
4939 /* Copy result to requested destination
4940 if we have been computing in a temp location. */
4944 if (GET_MODE (target
) == GET_MODE (to
))
4945 emit_move_insn (to
, target
);
4947 convert_move (to
, target
, 0);
4951 /* expand_fix: generate code to convert FROM to fixed point
4952 and store in TO. FROM must be floating point. */
4957 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4958 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4962 expand_fix (rtx to
, rtx from
, int unsignedp
)
4964 enum insn_code icode
;
4966 enum machine_mode fmode
, imode
;
4970 /* We first try to find a pair of modes, one real and one integer, at
4971 least as wide as FROM and TO, respectively, in which we can open-code
4972 this conversion. If the integer mode is wider than the mode of TO,
4973 we can do the conversion either signed or unsigned. */
4975 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4976 fmode
= GET_MODE_WIDER_MODE (fmode
))
4977 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4978 imode
= GET_MODE_WIDER_MODE (imode
))
4980 int doing_unsigned
= unsignedp
;
4982 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4983 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4984 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4986 if (icode
!= CODE_FOR_nothing
)
4988 to
= protect_from_queue (to
, 1);
4989 from
= protect_from_queue (from
, 0);
4991 if (fmode
!= GET_MODE (from
))
4992 from
= convert_to_mode (fmode
, from
, 0);
4995 from
= ftruncify (from
);
4997 if (imode
!= GET_MODE (to
))
4998 target
= gen_reg_rtx (imode
);
5000 emit_unop_insn (icode
, target
, from
,
5001 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5003 convert_move (to
, target
, unsignedp
);
5008 /* For an unsigned conversion, there is one more way to do it.
5009 If we have a signed conversion, we generate code that compares
5010 the real value to the largest representable positive number. If if
5011 is smaller, the conversion is done normally. Otherwise, subtract
5012 one plus the highest signed number, convert, and add it back.
5014 We only need to check all real modes, since we know we didn't find
5015 anything with a wider integer mode.
5017 This code used to extend FP value into mode wider than the destination.
5018 This is not needed. Consider, for instance conversion from SFmode
5021 The hot path trought the code is dealing with inputs smaller than 2^63
5022 and doing just the conversion, so there is no bits to lose.
5024 In the other path we know the value is positive in the range 2^63..2^64-1
5025 inclusive. (as for other imput overflow happens and result is undefined)
5026 So we know that the most important bit set in mantissa corresponds to
5027 2^63. The subtraction of 2^63 should not generate any rounding as it
5028 simply clears out that bit. The rest is trivial. */
5030 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5031 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5032 fmode
= GET_MODE_WIDER_MODE (fmode
))
5033 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
5037 REAL_VALUE_TYPE offset
;
5038 rtx limit
, lab1
, lab2
, insn
;
5040 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5041 real_2expN (&offset
, bitsize
- 1);
5042 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5043 lab1
= gen_label_rtx ();
5044 lab2
= gen_label_rtx ();
5047 to
= protect_from_queue (to
, 1);
5048 from
= protect_from_queue (from
, 0);
5051 from
= force_not_mem (from
);
5053 if (fmode
!= GET_MODE (from
))
5054 from
= convert_to_mode (fmode
, from
, 0);
5056 /* See if we need to do the subtraction. */
5057 do_pending_stack_adjust ();
5058 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5061 /* If not, do the signed "fix" and branch around fixup code. */
5062 expand_fix (to
, from
, 0);
5063 emit_jump_insn (gen_jump (lab2
));
5066 /* Otherwise, subtract 2**(N-1), convert to signed number,
5067 then add 2**(N-1). Do the addition using XOR since this
5068 will often generate better code. */
5070 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5071 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5072 expand_fix (to
, target
, 0);
5073 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5075 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5077 to
, 1, OPTAB_LIB_WIDEN
);
5080 emit_move_insn (to
, target
);
5084 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
5085 != CODE_FOR_nothing
)
5087 /* Make a place for a REG_NOTE and add it. */
5088 insn
= emit_move_insn (to
, to
);
5089 set_unique_reg_note (insn
,
5091 gen_rtx_fmt_e (UNSIGNED_FIX
,
5099 /* We can't do it with an insn, so use a library call. But first ensure
5100 that the mode of TO is at least as wide as SImode, since those are the
5101 only library calls we know about. */
5103 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5105 target
= gen_reg_rtx (SImode
);
5107 expand_fix (target
, from
, unsignedp
);
5109 else if (GET_MODE (from
) == SFmode
)
5111 if (GET_MODE (to
) == SImode
)
5112 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
5113 else if (GET_MODE (to
) == DImode
)
5114 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
5115 else if (GET_MODE (to
) == TImode
)
5116 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
5120 else if (GET_MODE (from
) == DFmode
)
5122 if (GET_MODE (to
) == SImode
)
5123 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
5124 else if (GET_MODE (to
) == DImode
)
5125 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
5126 else if (GET_MODE (to
) == TImode
)
5127 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
5131 else if (GET_MODE (from
) == XFmode
)
5133 if (GET_MODE (to
) == SImode
)
5134 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
5135 else if (GET_MODE (to
) == DImode
)
5136 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
5137 else if (GET_MODE (to
) == TImode
)
5138 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
5142 else if (GET_MODE (from
) == TFmode
)
5144 if (GET_MODE (to
) == SImode
)
5145 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
5146 else if (GET_MODE (to
) == DImode
)
5147 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
5148 else if (GET_MODE (to
) == TImode
)
5149 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
5161 to
= protect_from_queue (to
, 1);
5162 from
= protect_from_queue (from
, 0);
5165 from
= force_not_mem (from
);
5169 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
5170 GET_MODE (to
), 1, from
,
5172 insns
= get_insns ();
5175 emit_libcall_block (insns
, target
, value
,
5176 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5177 GET_MODE (to
), from
));
5182 if (GET_MODE (to
) == GET_MODE (target
))
5183 emit_move_insn (to
, target
);
5185 convert_move (to
, target
, 0);
5189 /* Report whether we have an instruction to perform the operation
5190 specified by CODE on operands of mode MODE. */
5192 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
5194 return (code_to_optab
[(int) code
] != 0
5195 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
5196 != CODE_FOR_nothing
));
5199 /* Create a blank optab. */
5204 optab op
= ggc_alloc (sizeof (struct optab
));
5205 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5207 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
5208 op
->handlers
[i
].libfunc
= 0;
5214 /* Same, but fill in its code as CODE, and write it into the
5215 code_to_optab table. */
5217 init_optab (enum rtx_code code
)
5219 optab op
= new_optab ();
5221 code_to_optab
[(int) code
] = op
;
5225 /* Same, but fill in its code as CODE, and do _not_ write it into
5226 the code_to_optab table. */
5228 init_optabv (enum rtx_code code
)
5230 optab op
= new_optab ();
5235 /* Initialize the libfunc fields of an entire group of entries in some
5236 optab. Each entry is set equal to a string consisting of a leading
5237 pair of underscores followed by a generic operation name followed by
5238 a mode name (downshifted to lower case) followed by a single character
5239 representing the number of operands for the given operation (which is
5240 usually one of the characters '2', '3', or '4').
5242 OPTABLE is the table in which libfunc fields are to be initialized.
5243 FIRST_MODE is the first machine mode index in the given optab to
5245 LAST_MODE is the last machine mode index in the given optab to
5247 OPNAME is the generic (string) name of the operation.
5248 SUFFIX is the character which specifies the number of operands for
5249 the given generic operation.
5253 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
5254 const char *opname
, int suffix
)
5257 unsigned opname_len
= strlen (opname
);
5259 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5260 mode
= (enum machine_mode
) ((int) mode
+ 1))
5262 const char *mname
= GET_MODE_NAME (mode
);
5263 unsigned mname_len
= strlen (mname
);
5264 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5271 for (q
= opname
; *q
; )
5273 for (q
= mname
; *q
; q
++)
5274 *p
++ = TOLOWER (*q
);
5278 optable
->handlers
[(int) mode
].libfunc
5279 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5283 /* Initialize the libfunc fields of an entire group of entries in some
5284 optab which correspond to all integer mode operations. The parameters
5285 have the same meaning as similarly named ones for the `init_libfuncs'
5286 routine. (See above). */
5289 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
5291 int maxsize
= 2*BITS_PER_WORD
;
5292 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5293 maxsize
= LONG_LONG_TYPE_SIZE
;
5294 init_libfuncs (optable
, word_mode
,
5295 mode_for_size (maxsize
, MODE_INT
, 0),
5299 /* Initialize the libfunc fields of an entire group of entries in some
5300 optab which correspond to all real mode operations. The parameters
5301 have the same meaning as similarly named ones for the `init_libfuncs'
5302 routine. (See above). */
5305 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5307 enum machine_mode fmode
, dmode
, lmode
;
5309 fmode
= float_type_node
? TYPE_MODE (float_type_node
) : VOIDmode
;
5310 dmode
= double_type_node
? TYPE_MODE (double_type_node
) : VOIDmode
;
5311 lmode
= long_double_type_node
? TYPE_MODE (long_double_type_node
) : VOIDmode
;
5313 if (fmode
!= VOIDmode
)
5314 init_libfuncs (optable
, fmode
, fmode
, opname
, suffix
);
5315 if (dmode
!= fmode
&& dmode
!= VOIDmode
)
5316 init_libfuncs (optable
, dmode
, dmode
, opname
, suffix
);
5317 if (lmode
!= dmode
&& lmode
!= VOIDmode
)
5318 init_libfuncs (optable
, lmode
, lmode
, opname
, suffix
);
5322 init_one_libfunc (const char *name
)
5326 /* Create a FUNCTION_DECL that can be passed to
5327 targetm.encode_section_info. */
5328 /* ??? We don't have any type information except for this is
5329 a function. Pretend this is "int foo()". */
5330 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5331 build_function_type (integer_type_node
, NULL_TREE
));
5332 DECL_ARTIFICIAL (decl
) = 1;
5333 DECL_EXTERNAL (decl
) = 1;
5334 TREE_PUBLIC (decl
) = 1;
5336 symbol
= XEXP (DECL_RTL (decl
), 0);
5338 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5339 are the flags assigned by targetm.encode_section_info. */
5340 SYMBOL_REF_DECL (symbol
) = 0;
5345 /* Call this once to initialize the contents of the optabs
5346 appropriately for the current target machine. */
5351 unsigned int i
, j
, k
;
5353 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5355 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
5356 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
5357 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
5358 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
5360 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
5361 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
5362 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
5363 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
5365 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
5366 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
5367 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
5368 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
5370 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
5371 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
5372 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
5373 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
5375 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5376 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5378 #ifdef HAVE_conditional_move
5379 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5380 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5383 add_optab
= init_optab (PLUS
);
5384 addv_optab
= init_optabv (PLUS
);
5385 sub_optab
= init_optab (MINUS
);
5386 subv_optab
= init_optabv (MINUS
);
5387 smul_optab
= init_optab (MULT
);
5388 smulv_optab
= init_optabv (MULT
);
5389 smul_highpart_optab
= init_optab (UNKNOWN
);
5390 umul_highpart_optab
= init_optab (UNKNOWN
);
5391 smul_widen_optab
= init_optab (UNKNOWN
);
5392 umul_widen_optab
= init_optab (UNKNOWN
);
5393 sdiv_optab
= init_optab (DIV
);
5394 sdivv_optab
= init_optabv (DIV
);
5395 sdivmod_optab
= init_optab (UNKNOWN
);
5396 udiv_optab
= init_optab (UDIV
);
5397 udivmod_optab
= init_optab (UNKNOWN
);
5398 smod_optab
= init_optab (MOD
);
5399 umod_optab
= init_optab (UMOD
);
5400 ftrunc_optab
= init_optab (UNKNOWN
);
5401 and_optab
= init_optab (AND
);
5402 ior_optab
= init_optab (IOR
);
5403 xor_optab
= init_optab (XOR
);
5404 ashl_optab
= init_optab (ASHIFT
);
5405 ashr_optab
= init_optab (ASHIFTRT
);
5406 lshr_optab
= init_optab (LSHIFTRT
);
5407 rotl_optab
= init_optab (ROTATE
);
5408 rotr_optab
= init_optab (ROTATERT
);
5409 smin_optab
= init_optab (SMIN
);
5410 smax_optab
= init_optab (SMAX
);
5411 umin_optab
= init_optab (UMIN
);
5412 umax_optab
= init_optab (UMAX
);
5413 pow_optab
= init_optab (UNKNOWN
);
5414 atan2_optab
= init_optab (UNKNOWN
);
5416 /* These three have codes assigned exclusively for the sake of
5418 mov_optab
= init_optab (SET
);
5419 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5420 cmp_optab
= init_optab (COMPARE
);
5422 ucmp_optab
= init_optab (UNKNOWN
);
5423 tst_optab
= init_optab (UNKNOWN
);
5424 neg_optab
= init_optab (NEG
);
5425 negv_optab
= init_optabv (NEG
);
5426 abs_optab
= init_optab (ABS
);
5427 absv_optab
= init_optabv (ABS
);
5428 addcc_optab
= init_optab (UNKNOWN
);
5429 one_cmpl_optab
= init_optab (NOT
);
5430 ffs_optab
= init_optab (FFS
);
5431 clz_optab
= init_optab (CLZ
);
5432 ctz_optab
= init_optab (CTZ
);
5433 popcount_optab
= init_optab (POPCOUNT
);
5434 parity_optab
= init_optab (PARITY
);
5435 sqrt_optab
= init_optab (SQRT
);
5436 floor_optab
= init_optab (UNKNOWN
);
5437 ceil_optab
= init_optab (UNKNOWN
);
5438 round_optab
= init_optab (UNKNOWN
);
5439 trunc_optab
= init_optab (UNKNOWN
);
5440 nearbyint_optab
= init_optab (UNKNOWN
);
5441 sin_optab
= init_optab (UNKNOWN
);
5442 cos_optab
= init_optab (UNKNOWN
);
5443 exp_optab
= init_optab (UNKNOWN
);
5444 log_optab
= init_optab (UNKNOWN
);
5445 tan_optab
= init_optab (UNKNOWN
);
5446 atan_optab
= init_optab (UNKNOWN
);
5447 strlen_optab
= init_optab (UNKNOWN
);
5448 cbranch_optab
= init_optab (UNKNOWN
);
5449 cmov_optab
= init_optab (UNKNOWN
);
5450 cstore_optab
= init_optab (UNKNOWN
);
5451 push_optab
= init_optab (UNKNOWN
);
5453 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5455 movstr_optab
[i
] = CODE_FOR_nothing
;
5456 clrstr_optab
[i
] = CODE_FOR_nothing
;
5458 #ifdef HAVE_SECONDARY_RELOADS
5459 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5463 /* Fill in the optabs with the insns we support. */
5466 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5467 /* This flag says the same insns that convert to a signed fixnum
5468 also convert validly to an unsigned one. */
5469 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5470 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5471 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
5474 /* Initialize the optabs with the names of the library functions. */
5475 init_integral_libfuncs (add_optab
, "add", '3');
5476 init_floating_libfuncs (add_optab
, "add", '3');
5477 init_integral_libfuncs (addv_optab
, "addv", '3');
5478 init_floating_libfuncs (addv_optab
, "add", '3');
5479 init_integral_libfuncs (sub_optab
, "sub", '3');
5480 init_floating_libfuncs (sub_optab
, "sub", '3');
5481 init_integral_libfuncs (subv_optab
, "subv", '3');
5482 init_floating_libfuncs (subv_optab
, "sub", '3');
5483 init_integral_libfuncs (smul_optab
, "mul", '3');
5484 init_floating_libfuncs (smul_optab
, "mul", '3');
5485 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5486 init_floating_libfuncs (smulv_optab
, "mul", '3');
5487 init_integral_libfuncs (sdiv_optab
, "div", '3');
5488 init_floating_libfuncs (sdiv_optab
, "div", '3');
5489 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5490 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5491 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5492 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5493 init_integral_libfuncs (smod_optab
, "mod", '3');
5494 init_integral_libfuncs (umod_optab
, "umod", '3');
5495 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5496 init_integral_libfuncs (and_optab
, "and", '3');
5497 init_integral_libfuncs (ior_optab
, "ior", '3');
5498 init_integral_libfuncs (xor_optab
, "xor", '3');
5499 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5500 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5501 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5502 init_integral_libfuncs (smin_optab
, "min", '3');
5503 init_floating_libfuncs (smin_optab
, "min", '3');
5504 init_integral_libfuncs (smax_optab
, "max", '3');
5505 init_floating_libfuncs (smax_optab
, "max", '3');
5506 init_integral_libfuncs (umin_optab
, "umin", '3');
5507 init_integral_libfuncs (umax_optab
, "umax", '3');
5508 init_integral_libfuncs (neg_optab
, "neg", '2');
5509 init_floating_libfuncs (neg_optab
, "neg", '2');
5510 init_integral_libfuncs (negv_optab
, "negv", '2');
5511 init_floating_libfuncs (negv_optab
, "neg", '2');
5512 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5513 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5514 init_integral_libfuncs (clz_optab
, "clz", '2');
5515 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5516 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5517 init_integral_libfuncs (parity_optab
, "parity", '2');
5519 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5520 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5521 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5522 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5524 #ifdef MULSI3_LIBCALL
5525 smul_optab
->handlers
[(int) SImode
].libfunc
5526 = init_one_libfunc (MULSI3_LIBCALL
);
5528 #ifdef MULDI3_LIBCALL
5529 smul_optab
->handlers
[(int) DImode
].libfunc
5530 = init_one_libfunc (MULDI3_LIBCALL
);
5533 #ifdef DIVSI3_LIBCALL
5534 sdiv_optab
->handlers
[(int) SImode
].libfunc
5535 = init_one_libfunc (DIVSI3_LIBCALL
);
5537 #ifdef DIVDI3_LIBCALL
5538 sdiv_optab
->handlers
[(int) DImode
].libfunc
5539 = init_one_libfunc (DIVDI3_LIBCALL
);
5542 #ifdef UDIVSI3_LIBCALL
5543 udiv_optab
->handlers
[(int) SImode
].libfunc
5544 = init_one_libfunc (UDIVSI3_LIBCALL
);
5546 #ifdef UDIVDI3_LIBCALL
5547 udiv_optab
->handlers
[(int) DImode
].libfunc
5548 = init_one_libfunc (UDIVDI3_LIBCALL
);
5551 #ifdef MODSI3_LIBCALL
5552 smod_optab
->handlers
[(int) SImode
].libfunc
5553 = init_one_libfunc (MODSI3_LIBCALL
);
5555 #ifdef MODDI3_LIBCALL
5556 smod_optab
->handlers
[(int) DImode
].libfunc
5557 = init_one_libfunc (MODDI3_LIBCALL
);
5560 #ifdef UMODSI3_LIBCALL
5561 umod_optab
->handlers
[(int) SImode
].libfunc
5562 = init_one_libfunc (UMODSI3_LIBCALL
);
5564 #ifdef UMODDI3_LIBCALL
5565 umod_optab
->handlers
[(int) DImode
].libfunc
5566 = init_one_libfunc (UMODDI3_LIBCALL
);
5569 /* Use cabs for DC complex abs, since systems generally have cabs.
5570 Don't define any libcall for SCmode, so that cabs will be used. */
5571 abs_optab
->handlers
[(int) DCmode
].libfunc
5572 = init_one_libfunc ("cabs");
5574 /* The ffs function operates on `int'. */
5575 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5576 = init_one_libfunc ("ffs");
5578 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
5579 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
5580 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
5581 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
5582 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
5584 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
5585 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
5586 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
5587 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
5588 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
5590 abort_libfunc
= init_one_libfunc ("abort");
5591 memcpy_libfunc
= init_one_libfunc ("memcpy");
5592 memmove_libfunc
= init_one_libfunc ("memmove");
5593 bcopy_libfunc
= init_one_libfunc ("bcopy");
5594 memcmp_libfunc
= init_one_libfunc ("memcmp");
5595 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5596 memset_libfunc
= init_one_libfunc ("memset");
5597 bzero_libfunc
= init_one_libfunc ("bzero");
5598 setbits_libfunc
= init_one_libfunc ("__setbits");
5600 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5601 ? "_Unwind_SjLj_Resume"
5602 : "_Unwind_Resume");
5603 #ifndef DONT_USE_BUILTIN_SETJMP
5604 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5605 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5607 setjmp_libfunc
= init_one_libfunc ("setjmp");
5608 longjmp_libfunc
= init_one_libfunc ("longjmp");
5610 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5611 unwind_sjlj_unregister_libfunc
5612 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5614 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
5615 nehf2_libfunc
= init_one_libfunc ("__nehf2");
5616 gthf2_libfunc
= init_one_libfunc ("__gthf2");
5617 gehf2_libfunc
= init_one_libfunc ("__gehf2");
5618 lthf2_libfunc
= init_one_libfunc ("__lthf2");
5619 lehf2_libfunc
= init_one_libfunc ("__lehf2");
5620 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
5622 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
5623 nesf2_libfunc
= init_one_libfunc ("__nesf2");
5624 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
5625 gesf2_libfunc
= init_one_libfunc ("__gesf2");
5626 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
5627 lesf2_libfunc
= init_one_libfunc ("__lesf2");
5628 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
5630 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
5631 nedf2_libfunc
= init_one_libfunc ("__nedf2");
5632 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
5633 gedf2_libfunc
= init_one_libfunc ("__gedf2");
5634 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
5635 ledf2_libfunc
= init_one_libfunc ("__ledf2");
5636 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
5638 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
5639 nexf2_libfunc
= init_one_libfunc ("__nexf2");
5640 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
5641 gexf2_libfunc
= init_one_libfunc ("__gexf2");
5642 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
5643 lexf2_libfunc
= init_one_libfunc ("__lexf2");
5644 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
5646 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
5647 netf2_libfunc
= init_one_libfunc ("__netf2");
5648 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5649 getf2_libfunc
= init_one_libfunc ("__getf2");
5650 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5651 letf2_libfunc
= init_one_libfunc ("__letf2");
5652 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5654 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5655 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5656 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5658 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5659 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5660 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5662 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5663 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5664 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5666 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5667 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5668 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5670 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5671 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5672 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5674 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5675 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5676 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5678 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5679 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5680 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5682 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5683 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5684 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5686 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5687 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5688 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5690 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5691 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5692 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5694 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5695 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5696 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5698 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5699 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5700 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5702 /* For function entry/exit instrumentation. */
5703 profile_function_entry_libfunc
5704 = init_one_libfunc ("__cyg_profile_func_enter");
5705 profile_function_exit_libfunc
5706 = init_one_libfunc ("__cyg_profile_func_exit");
5708 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5709 gcov_init_libfunc
= init_one_libfunc ("__gcov_init");
5711 if (HAVE_conditional_trap
)
5712 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5714 #ifdef INIT_TARGET_OPTABS
5715 /* Allow the target to add more libcalls or rename some, etc. */
5720 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5721 CODE. Return 0 on failure. */
5724 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5725 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5727 enum machine_mode mode
= GET_MODE (op1
);
5728 enum insn_code icode
;
5731 if (!HAVE_conditional_trap
)
5734 if (mode
== VOIDmode
)
5737 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5738 if (icode
== CODE_FOR_nothing
)
5742 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5743 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5749 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5751 PUT_CODE (trap_rtx
, code
);
5752 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5756 insn
= get_insns ();
5763 #include "gt-optabs.h"