1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
54 See expr.h for documentation of these optabs. */
56 optab optab_table
[OTI_MAX
];
58 rtx libfunc_table
[LTI_MAX
];
60 /* Tables of patterns for extending one integer mode to another. */
61 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
63 /* Tables of patterns for converting between fixed and floating point. */
64 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
66 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
68 /* Contains the optab used for each rtx code. */
69 optab code_to_optab
[NUM_RTX_CODE
+ 1];
71 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
72 gives the gen_function to make a branch to test that condition. */
74 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
76 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
77 gives the insn code to make a store-condition insn
78 to test that condition. */
80 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
82 #ifdef HAVE_conditional_move
83 /* Indexed by the machine mode, gives the insn code to make a conditional
84 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
85 setcc_gen_code to cut down on the number of named patterns. Consider a day
86 when a lot more rtx codes are conditional (eg: for the ARM). */
88 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
91 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
92 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
93 enum machine_mode
, int, int));
94 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
95 rtx
, rtx
, enum machine_mode
,
96 int, enum optab_methods
,
97 enum mode_class
, optab
));
98 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
99 rtx
, rtx
, enum machine_mode
,
100 int, enum optab_methods
,
101 enum mode_class
, optab
));
102 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
));
105 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
107 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
110 static rtx ftruncify
PARAMS ((rtx
));
111 static optab new_optab
PARAMS ((void));
112 static inline optab init_optab
PARAMS ((enum rtx_code
));
113 static inline optab init_optabv
PARAMS ((enum rtx_code
));
114 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
115 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
116 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
117 #ifdef HAVE_conditional_trap
118 static void init_traps
PARAMS ((void));
120 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
121 enum rtx_code
, int, rtx
));
122 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
123 enum machine_mode
*, int *));
124 static rtx expand_vector_binop
PARAMS ((enum machine_mode
, optab
,
126 enum optab_methods
));
127 static rtx expand_vector_unop
PARAMS ((enum machine_mode
, optab
, rtx
, rtx
,
129 static rtx widen_clz
PARAMS ((enum machine_mode
, rtx
, rtx
));
130 static rtx expand_parity
PARAMS ((enum machine_mode
, rtx
, rtx
));
132 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
133 the result of operation CODE applied to OP0 (and OP1 if it is a binary
136 If the last insn does not set TARGET, don't do anything, but return 1.
138 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
139 don't add the REG_EQUAL note but return 0. Our caller can then try
140 again, ensuring that TARGET is not one of the operands. */
143 add_equal_note (insns
, target
, code
, op0
, op1
)
149 rtx last_insn
, insn
, set
;
154 || NEXT_INSN (insns
) == NULL_RTX
)
157 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
158 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == '1')
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
213 enum machine_mode mode
, oldmode
;
219 /* If we don't have to extend and this is a constant, return it. */
220 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
223 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
224 extend since it will be more efficient to do so unless the signedness of
225 a promoted object differs from our extension. */
227 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
228 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
229 return convert_modes (mode
, oldmode
, op
, unsignedp
);
231 /* If MODE is no wider than a single word, we return a paradoxical
233 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
234 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
236 /* Otherwise, get an object of MODE, clobber it, and set the low-order
239 result
= gen_reg_rtx (mode
);
240 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
241 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
245 /* Generate code to perform a straightforward complex divide. */
248 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
249 unsignedp
, methods
, class, binoptab
)
250 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
251 enum machine_mode submode
;
253 enum optab_methods methods
;
254 enum mode_class
class;
261 optab this_add_optab
= add_optab
;
262 optab this_sub_optab
= sub_optab
;
263 optab this_neg_optab
= neg_optab
;
264 optab this_mul_optab
= smul_optab
;
266 if (binoptab
== sdivv_optab
)
268 this_add_optab
= addv_optab
;
269 this_sub_optab
= subv_optab
;
270 this_neg_optab
= negv_optab
;
271 this_mul_optab
= smulv_optab
;
274 /* Don't fetch these from memory more than once. */
275 real0
= force_reg (submode
, real0
);
276 real1
= force_reg (submode
, real1
);
279 imag0
= force_reg (submode
, imag0
);
281 imag1
= force_reg (submode
, imag1
);
283 /* Divisor: c*c + d*d. */
284 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
285 NULL_RTX
, unsignedp
, methods
);
287 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
288 NULL_RTX
, unsignedp
, methods
);
290 if (temp1
== 0 || temp2
== 0)
293 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
294 NULL_RTX
, unsignedp
, methods
);
300 /* Mathematically, ((a)(c-id))/divisor. */
301 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
303 /* Calculate the dividend. */
304 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
305 NULL_RTX
, unsignedp
, methods
);
307 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
308 NULL_RTX
, unsignedp
, methods
);
310 if (real_t
== 0 || imag_t
== 0)
313 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
314 NULL_RTX
, unsignedp
);
318 /* Mathematically, ((a+ib)(c-id))/divider. */
319 /* Calculate the dividend. */
320 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
321 NULL_RTX
, unsignedp
, methods
);
323 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
324 NULL_RTX
, unsignedp
, methods
);
326 if (temp1
== 0 || temp2
== 0)
329 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
330 NULL_RTX
, unsignedp
, methods
);
332 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
333 NULL_RTX
, unsignedp
, methods
);
335 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
336 NULL_RTX
, unsignedp
, methods
);
338 if (temp1
== 0 || temp2
== 0)
341 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
342 NULL_RTX
, unsignedp
, methods
);
344 if (real_t
== 0 || imag_t
== 0)
348 if (class == MODE_COMPLEX_FLOAT
)
349 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
350 realr
, unsignedp
, methods
);
352 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
353 real_t
, divisor
, realr
, unsignedp
);
359 emit_move_insn (realr
, res
);
361 if (class == MODE_COMPLEX_FLOAT
)
362 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
363 imagr
, unsignedp
, methods
);
365 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
366 imag_t
, divisor
, imagr
, unsignedp
);
372 emit_move_insn (imagr
, res
);
377 /* Generate code to perform a wide-input-range-acceptable complex divide. */
380 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
381 unsignedp
, methods
, class, binoptab
)
382 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
383 enum machine_mode submode
;
385 enum optab_methods methods
;
386 enum mode_class
class;
391 rtx temp1
, temp2
, lab1
, lab2
;
392 enum machine_mode mode
;
394 optab this_add_optab
= add_optab
;
395 optab this_sub_optab
= sub_optab
;
396 optab this_neg_optab
= neg_optab
;
397 optab this_mul_optab
= smul_optab
;
399 if (binoptab
== sdivv_optab
)
401 this_add_optab
= addv_optab
;
402 this_sub_optab
= subv_optab
;
403 this_neg_optab
= negv_optab
;
404 this_mul_optab
= smulv_optab
;
407 /* Don't fetch these from memory more than once. */
408 real0
= force_reg (submode
, real0
);
409 real1
= force_reg (submode
, real1
);
412 imag0
= force_reg (submode
, imag0
);
414 imag1
= force_reg (submode
, imag1
);
416 /* XXX What's an "unsigned" complex number? */
424 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
425 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
428 if (temp1
== 0 || temp2
== 0)
431 mode
= GET_MODE (temp1
);
432 lab1
= gen_label_rtx ();
433 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
434 mode
, unsignedp
, lab1
);
436 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
438 if (class == MODE_COMPLEX_FLOAT
)
439 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
440 NULL_RTX
, unsignedp
, methods
);
442 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
443 imag1
, real1
, NULL_RTX
, unsignedp
);
448 /* Calculate divisor. */
450 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
451 NULL_RTX
, unsignedp
, methods
);
456 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
457 NULL_RTX
, unsignedp
, methods
);
462 /* Calculate dividend. */
468 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
470 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
471 NULL_RTX
, unsignedp
, methods
);
476 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
477 NULL_RTX
, unsignedp
);
479 if (real_t
== 0 || imag_t
== 0)
484 /* Compute (a+ib)/(c+id) as
485 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
487 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
488 NULL_RTX
, unsignedp
, methods
);
493 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
494 NULL_RTX
, unsignedp
, methods
);
496 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
497 NULL_RTX
, unsignedp
, methods
);
502 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
503 NULL_RTX
, unsignedp
, methods
);
505 if (real_t
== 0 || imag_t
== 0)
509 if (class == MODE_COMPLEX_FLOAT
)
510 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
511 realr
, unsignedp
, methods
);
513 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
514 real_t
, divisor
, realr
, unsignedp
);
520 emit_move_insn (realr
, res
);
522 if (class == MODE_COMPLEX_FLOAT
)
523 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
524 imagr
, unsignedp
, methods
);
526 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
527 imag_t
, divisor
, imagr
, unsignedp
);
533 emit_move_insn (imagr
, res
);
535 lab2
= gen_label_rtx ();
536 emit_jump_insn (gen_jump (lab2
));
541 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
543 if (class == MODE_COMPLEX_FLOAT
)
544 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
545 NULL_RTX
, unsignedp
, methods
);
547 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
548 real1
, imag1
, NULL_RTX
, unsignedp
);
553 /* Calculate divisor. */
555 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
556 NULL_RTX
, unsignedp
, methods
);
561 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
562 NULL_RTX
, unsignedp
, methods
);
567 /* Calculate dividend. */
571 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
573 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
574 NULL_RTX
, unsignedp
, methods
);
576 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
577 NULL_RTX
, unsignedp
);
579 if (real_t
== 0 || imag_t
== 0)
584 /* Compute (a+ib)/(c+id) as
585 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
587 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
588 NULL_RTX
, unsignedp
, methods
);
593 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
594 NULL_RTX
, unsignedp
, methods
);
596 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
597 NULL_RTX
, unsignedp
, methods
);
602 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
603 NULL_RTX
, unsignedp
, methods
);
605 if (real_t
== 0 || imag_t
== 0)
609 if (class == MODE_COMPLEX_FLOAT
)
610 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
611 realr
, unsignedp
, methods
);
613 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
614 real_t
, divisor
, realr
, unsignedp
);
620 emit_move_insn (realr
, res
);
622 if (class == MODE_COMPLEX_FLOAT
)
623 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
624 imagr
, unsignedp
, methods
);
626 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
627 imag_t
, divisor
, imagr
, unsignedp
);
633 emit_move_insn (imagr
, res
);
640 /* Wrapper around expand_binop which takes an rtx code to specify
641 the operation to perform, not an optab pointer. All other
642 arguments are the same. */
644 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
645 enum machine_mode mode
;
650 enum optab_methods methods
;
652 optab binop
= code_to_optab
[(int) code
];
656 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
659 /* Generate code to perform an operation specified by BINOPTAB
660 on operands OP0 and OP1, with result having machine-mode MODE.
662 UNSIGNEDP is for the case where we have to widen the operands
663 to perform the operation. It says to use zero-extension.
665 If TARGET is nonzero, the value
666 is generated there, if it is convenient to do so.
667 In all cases an rtx is returned for the locus of the value;
668 this may or may not be TARGET. */
671 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
672 enum machine_mode mode
;
677 enum optab_methods methods
;
679 enum optab_methods next_methods
680 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
681 ? OPTAB_WIDEN
: methods
);
682 enum mode_class
class;
683 enum machine_mode wider_mode
;
685 int commutative_op
= 0;
686 int shift_op
= (binoptab
->code
== ASHIFT
687 || binoptab
->code
== ASHIFTRT
688 || binoptab
->code
== LSHIFTRT
689 || binoptab
->code
== ROTATE
690 || binoptab
->code
== ROTATERT
);
691 rtx entry_last
= get_last_insn ();
694 class = GET_MODE_CLASS (mode
);
696 op0
= protect_from_queue (op0
, 0);
697 op1
= protect_from_queue (op1
, 0);
699 target
= protect_from_queue (target
, 1);
703 /* Load duplicate non-volatile operands once. */
704 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
706 op0
= force_not_mem (op0
);
711 op0
= force_not_mem (op0
);
712 op1
= force_not_mem (op1
);
716 /* If subtracting an integer constant, convert this into an addition of
717 the negated constant. */
719 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
721 op1
= negate_rtx (mode
, op1
);
722 binoptab
= add_optab
;
725 /* If we are inside an appropriately-short loop and one operand is an
726 expensive constant, force it into a register. */
727 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
728 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
729 op0
= force_reg (mode
, op0
);
731 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
732 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
733 op1
= force_reg (mode
, op1
);
735 /* Record where to delete back to if we backtrack. */
736 last
= get_last_insn ();
738 /* If operation is commutative,
739 try to make the first operand a register.
740 Even better, try to make it the same as the target.
741 Also try to make the last operand a constant. */
742 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
743 || binoptab
== smul_widen_optab
744 || binoptab
== umul_widen_optab
745 || binoptab
== smul_highpart_optab
746 || binoptab
== umul_highpart_optab
)
750 if (((target
== 0 || GET_CODE (target
) == REG
)
751 ? ((GET_CODE (op1
) == REG
752 && GET_CODE (op0
) != REG
)
754 : rtx_equal_p (op1
, target
))
755 || GET_CODE (op0
) == CONST_INT
)
763 /* If we can do it with a three-operand insn, do so. */
765 if (methods
!= OPTAB_MUST_WIDEN
766 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
768 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
769 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
770 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
772 rtx xop0
= op0
, xop1
= op1
;
777 temp
= gen_reg_rtx (mode
);
779 /* If it is a commutative operator and the modes would match
780 if we would swap the operands, we can save the conversions. */
783 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
784 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
788 tmp
= op0
; op0
= op1
; op1
= tmp
;
789 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
793 /* In case the insn wants input operands in modes different from
794 those of the actual operands, convert the operands. It would
795 seem that we don't need to convert CONST_INTs, but we do, so
796 that they're properly zero-extended, sign-extended or truncated
799 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
800 xop0
= convert_modes (mode0
,
801 GET_MODE (op0
) != VOIDmode
806 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
807 xop1
= convert_modes (mode1
,
808 GET_MODE (op1
) != VOIDmode
813 /* Now, if insn's predicates don't allow our operands, put them into
816 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
817 && mode0
!= VOIDmode
)
818 xop0
= copy_to_mode_reg (mode0
, xop0
);
820 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
821 && mode1
!= VOIDmode
)
822 xop1
= copy_to_mode_reg (mode1
, xop1
);
824 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
825 temp
= gen_reg_rtx (mode
);
827 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
830 /* If PAT is composed of more than one insn, try to add an appropriate
831 REG_EQUAL note to it. If we can't because TEMP conflicts with an
832 operand, call ourselves again, this time without a target. */
833 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
834 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
836 delete_insns_since (last
);
837 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
845 delete_insns_since (last
);
848 /* If this is a multiply, see if we can do a widening operation that
849 takes operands of this mode and makes a wider mode. */
851 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
852 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
853 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
854 != CODE_FOR_nothing
))
856 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
857 unsignedp
? umul_widen_optab
: smul_widen_optab
,
858 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
862 if (GET_MODE_CLASS (mode
) == MODE_INT
)
863 return gen_lowpart (mode
, temp
);
865 return convert_to_mode (mode
, temp
, unsignedp
);
869 /* Look for a wider mode of the same class for which we think we
870 can open-code the operation. Check for a widening multiply at the
871 wider mode as well. */
873 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
874 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
875 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
876 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
878 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
879 || (binoptab
== smul_optab
880 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
881 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
882 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
883 != CODE_FOR_nothing
)))
885 rtx xop0
= op0
, xop1
= op1
;
888 /* For certain integer operations, we need not actually extend
889 the narrow operands, as long as we will truncate
890 the results to the same narrowness. */
892 if ((binoptab
== ior_optab
|| binoptab
== and_optab
893 || binoptab
== xor_optab
894 || binoptab
== add_optab
|| binoptab
== sub_optab
895 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
896 && class == MODE_INT
)
899 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
901 /* The second operand of a shift must always be extended. */
902 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
903 no_extend
&& binoptab
!= ashl_optab
);
905 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
906 unsignedp
, OPTAB_DIRECT
);
909 if (class != MODE_INT
)
912 target
= gen_reg_rtx (mode
);
913 convert_move (target
, temp
, 0);
917 return gen_lowpart (mode
, temp
);
920 delete_insns_since (last
);
924 /* These can be done a word at a time. */
925 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
927 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
928 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
934 /* If TARGET is the same as one of the operands, the REG_EQUAL note
935 won't be accurate, so use a new target. */
936 if (target
== 0 || target
== op0
|| target
== op1
)
937 target
= gen_reg_rtx (mode
);
941 /* Do the actual arithmetic. */
942 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
944 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
945 rtx x
= expand_binop (word_mode
, binoptab
,
946 operand_subword_force (op0
, i
, mode
),
947 operand_subword_force (op1
, i
, mode
),
948 target_piece
, unsignedp
, next_methods
);
953 if (target_piece
!= x
)
954 emit_move_insn (target_piece
, x
);
957 insns
= get_insns ();
960 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
962 if (binoptab
->code
!= UNKNOWN
)
964 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
965 copy_rtx (op0
), copy_rtx (op1
));
969 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
974 /* Synthesize double word shifts from single word shifts. */
975 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
976 || binoptab
== ashr_optab
)
978 && GET_CODE (op1
) == CONST_INT
979 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
980 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
981 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
982 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
984 rtx insns
, inter
, equiv_value
;
985 rtx into_target
, outof_target
;
986 rtx into_input
, outof_input
;
987 int shift_count
, left_shift
, outof_word
;
989 /* If TARGET is the same as one of the operands, the REG_EQUAL note
990 won't be accurate, so use a new target. */
991 if (target
== 0 || target
== op0
|| target
== op1
)
992 target
= gen_reg_rtx (mode
);
996 shift_count
= INTVAL (op1
);
998 /* OUTOF_* is the word we are shifting bits away from, and
999 INTO_* is the word that we are shifting bits towards, thus
1000 they differ depending on the direction of the shift and
1001 WORDS_BIG_ENDIAN. */
1003 left_shift
= binoptab
== ashl_optab
;
1004 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1006 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1007 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1009 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1010 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1012 if (shift_count
>= BITS_PER_WORD
)
1014 inter
= expand_binop (word_mode
, binoptab
,
1016 GEN_INT (shift_count
- BITS_PER_WORD
),
1017 into_target
, unsignedp
, next_methods
);
1019 if (inter
!= 0 && inter
!= into_target
)
1020 emit_move_insn (into_target
, inter
);
1022 /* For a signed right shift, we must fill the word we are shifting
1023 out of with copies of the sign bit. Otherwise it is zeroed. */
1024 if (inter
!= 0 && binoptab
!= ashr_optab
)
1025 inter
= CONST0_RTX (word_mode
);
1026 else if (inter
!= 0)
1027 inter
= expand_binop (word_mode
, binoptab
,
1029 GEN_INT (BITS_PER_WORD
- 1),
1030 outof_target
, unsignedp
, next_methods
);
1032 if (inter
!= 0 && inter
!= outof_target
)
1033 emit_move_insn (outof_target
, inter
);
1038 optab reverse_unsigned_shift
, unsigned_shift
;
1040 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1041 we must do a logical shift in the opposite direction of the
1044 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1046 /* For a shift of less than BITS_PER_WORD, to compute the word
1047 shifted towards, we need to unsigned shift the orig value of
1050 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1052 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1054 GEN_INT (BITS_PER_WORD
- shift_count
),
1055 0, unsignedp
, next_methods
);
1060 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1061 op1
, 0, unsignedp
, next_methods
);
1064 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1065 into_target
, unsignedp
, next_methods
);
1067 if (inter
!= 0 && inter
!= into_target
)
1068 emit_move_insn (into_target
, inter
);
1071 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1072 op1
, outof_target
, unsignedp
, next_methods
);
1074 if (inter
!= 0 && inter
!= outof_target
)
1075 emit_move_insn (outof_target
, inter
);
1078 insns
= get_insns ();
1083 if (binoptab
->code
!= UNKNOWN
)
1084 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1088 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1093 /* Synthesize double word rotates from single word shifts. */
1094 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1095 && class == MODE_INT
1096 && GET_CODE (op1
) == CONST_INT
1097 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1098 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1099 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1101 rtx insns
, equiv_value
;
1102 rtx into_target
, outof_target
;
1103 rtx into_input
, outof_input
;
1105 int shift_count
, left_shift
, outof_word
;
1107 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1108 won't be accurate, so use a new target. */
1109 if (target
== 0 || target
== op0
|| target
== op1
)
1110 target
= gen_reg_rtx (mode
);
1114 shift_count
= INTVAL (op1
);
1116 /* OUTOF_* is the word we are shifting bits away from, and
1117 INTO_* is the word that we are shifting bits towards, thus
1118 they differ depending on the direction of the shift and
1119 WORDS_BIG_ENDIAN. */
1121 left_shift
= (binoptab
== rotl_optab
);
1122 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1124 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1125 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1127 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1128 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1130 if (shift_count
== BITS_PER_WORD
)
1132 /* This is just a word swap. */
1133 emit_move_insn (outof_target
, into_input
);
1134 emit_move_insn (into_target
, outof_input
);
1139 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1140 rtx first_shift_count
, second_shift_count
;
1141 optab reverse_unsigned_shift
, unsigned_shift
;
1143 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1144 ? lshr_optab
: ashl_optab
);
1146 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1147 ? ashl_optab
: lshr_optab
);
1149 if (shift_count
> BITS_PER_WORD
)
1151 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1152 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1156 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1157 second_shift_count
= GEN_INT (shift_count
);
1160 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1161 outof_input
, first_shift_count
,
1162 NULL_RTX
, unsignedp
, next_methods
);
1163 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1164 into_input
, second_shift_count
,
1165 NULL_RTX
, unsignedp
, next_methods
);
1167 if (into_temp1
!= 0 && into_temp2
!= 0)
1168 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1169 into_target
, unsignedp
, next_methods
);
1173 if (inter
!= 0 && inter
!= into_target
)
1174 emit_move_insn (into_target
, inter
);
1176 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1177 into_input
, first_shift_count
,
1178 NULL_RTX
, unsignedp
, next_methods
);
1179 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1180 outof_input
, second_shift_count
,
1181 NULL_RTX
, unsignedp
, next_methods
);
1183 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1184 inter
= expand_binop (word_mode
, ior_optab
,
1185 outof_temp1
, outof_temp2
,
1186 outof_target
, unsignedp
, next_methods
);
1188 if (inter
!= 0 && inter
!= outof_target
)
1189 emit_move_insn (outof_target
, inter
);
1192 insns
= get_insns ();
1197 if (binoptab
->code
!= UNKNOWN
)
1198 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1202 /* We can't make this a no conflict block if this is a word swap,
1203 because the word swap case fails if the input and output values
1204 are in the same register. */
1205 if (shift_count
!= BITS_PER_WORD
)
1206 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1215 /* These can be done a word at a time by propagating carries. */
1216 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1217 && class == MODE_INT
1218 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1219 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1222 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1223 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1224 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1225 rtx xop0
, xop1
, xtarget
;
1227 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1228 value is one of those, use it. Otherwise, use 1 since it is the
1229 one easiest to get. */
1230 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1231 int normalizep
= STORE_FLAG_VALUE
;
1236 /* Prepare the operands. */
1237 xop0
= force_reg (mode
, op0
);
1238 xop1
= force_reg (mode
, op1
);
1240 xtarget
= gen_reg_rtx (mode
);
1242 if (target
== 0 || GET_CODE (target
) != REG
)
1245 /* Indicate for flow that the entire target reg is being set. */
1246 if (GET_CODE (target
) == REG
)
1247 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1249 /* Do the actual arithmetic. */
1250 for (i
= 0; i
< nwords
; i
++)
1252 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1253 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1254 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1255 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1258 /* Main add/subtract of the input operands. */
1259 x
= expand_binop (word_mode
, binoptab
,
1260 op0_piece
, op1_piece
,
1261 target_piece
, unsignedp
, next_methods
);
1267 /* Store carry from main add/subtract. */
1268 carry_out
= gen_reg_rtx (word_mode
);
1269 carry_out
= emit_store_flag_force (carry_out
,
1270 (binoptab
== add_optab
1273 word_mode
, 1, normalizep
);
1280 /* Add/subtract previous carry to main result. */
1281 newx
= expand_binop (word_mode
,
1282 normalizep
== 1 ? binoptab
: otheroptab
,
1284 NULL_RTX
, 1, next_methods
);
1288 /* Get out carry from adding/subtracting carry in. */
1289 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1290 carry_tmp
= emit_store_flag_force (carry_tmp
,
1291 (binoptab
== add_optab
1294 word_mode
, 1, normalizep
);
1296 /* Logical-ior the two poss. carry together. */
1297 carry_out
= expand_binop (word_mode
, ior_optab
,
1298 carry_out
, carry_tmp
,
1299 carry_out
, 0, next_methods
);
1303 emit_move_insn (target_piece
, newx
);
1306 carry_in
= carry_out
;
1309 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1311 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1312 || ! rtx_equal_p (target
, xtarget
))
1314 rtx temp
= emit_move_insn (target
, xtarget
);
1316 set_unique_reg_note (temp
,
1318 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1329 delete_insns_since (last
);
1332 /* If we want to multiply two two-word values and have normal and widening
1333 multiplies of single-word values, we can do this with three smaller
1334 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1335 because we are not operating on one word at a time.
1337 The multiplication proceeds as follows:
1338 _______________________
1339 [__op0_high_|__op0_low__]
1340 _______________________
1341 * [__op1_high_|__op1_low__]
1342 _______________________________________________
1343 _______________________
1344 (1) [__op0_low__*__op1_low__]
1345 _______________________
1346 (2a) [__op0_low__*__op1_high_]
1347 _______________________
1348 (2b) [__op0_high_*__op1_low__]
1349 _______________________
1350 (3) [__op0_high_*__op1_high_]
1353 This gives a 4-word result. Since we are only interested in the
1354 lower 2 words, partial result (3) and the upper words of (2a) and
1355 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1356 calculated using non-widening multiplication.
1358 (1), however, needs to be calculated with an unsigned widening
1359 multiplication. If this operation is not directly supported we
1360 try using a signed widening multiplication and adjust the result.
1361 This adjustment works as follows:
1363 If both operands are positive then no adjustment is needed.
1365 If the operands have different signs, for example op0_low < 0 and
1366 op1_low >= 0, the instruction treats the most significant bit of
1367 op0_low as a sign bit instead of a bit with significance
1368 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1369 with 2**BITS_PER_WORD - op0_low, and two's complements the
1370 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1373 Similarly, if both operands are negative, we need to add
1374 (op0_low + op1_low) * 2**BITS_PER_WORD.
1376 We use a trick to adjust quickly. We logically shift op0_low right
1377 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1378 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1379 logical shift exists, we do an arithmetic right shift and subtract
1382 if (binoptab
== smul_optab
1383 && class == MODE_INT
1384 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1385 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1386 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1387 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1388 != CODE_FOR_nothing
)
1389 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1390 != CODE_FOR_nothing
)))
1392 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1393 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1394 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1395 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1396 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1397 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1399 rtx op0_xhigh
= NULL_RTX
;
1400 rtx op1_xhigh
= NULL_RTX
;
1402 /* If the target is the same as one of the inputs, don't use it. This
1403 prevents problems with the REG_EQUAL note. */
1404 if (target
== op0
|| target
== op1
1405 || (target
!= 0 && GET_CODE (target
) != REG
))
1408 /* Multiply the two lower words to get a double-word product.
1409 If unsigned widening multiplication is available, use that;
1410 otherwise use the signed form and compensate. */
1412 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1414 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1415 target
, 1, OPTAB_DIRECT
);
1417 /* If we didn't succeed, delete everything we did so far. */
1419 delete_insns_since (last
);
1421 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1425 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1426 != CODE_FOR_nothing
)
1428 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1429 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1430 target
, 1, OPTAB_DIRECT
);
1431 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1432 NULL_RTX
, 1, next_methods
);
1434 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1435 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1438 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1439 NULL_RTX
, 0, next_methods
);
1441 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1442 op0_xhigh
, op0_xhigh
, 0,
1446 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1447 NULL_RTX
, 1, next_methods
);
1449 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1450 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1453 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1454 NULL_RTX
, 0, next_methods
);
1456 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1457 op1_xhigh
, op1_xhigh
, 0,
1462 /* If we have been able to directly compute the product of the
1463 low-order words of the operands and perform any required adjustments
1464 of the operands, we proceed by trying two more multiplications
1465 and then computing the appropriate sum.
1467 We have checked above that the required addition is provided.
1468 Full-word addition will normally always succeed, especially if
1469 it is provided at all, so we don't worry about its failure. The
1470 multiplication may well fail, however, so we do handle that. */
1472 if (product
&& op0_xhigh
&& op1_xhigh
)
1474 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1475 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1476 NULL_RTX
, 0, OPTAB_DIRECT
);
1478 if (!REG_P (product_high
))
1479 product_high
= force_reg (word_mode
, product_high
);
1482 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1483 product_high
, 0, next_methods
);
1485 if (temp
!= 0 && temp
!= product_high
)
1486 emit_move_insn (product_high
, temp
);
1489 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1490 NULL_RTX
, 0, OPTAB_DIRECT
);
1493 temp
= expand_binop (word_mode
, add_optab
, temp
,
1494 product_high
, product_high
,
1497 if (temp
!= 0 && temp
!= product_high
)
1498 emit_move_insn (product_high
, temp
);
1500 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1504 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1506 temp
= emit_move_insn (product
, product
);
1507 set_unique_reg_note (temp
,
1509 gen_rtx_fmt_ee (MULT
, mode
,
1518 /* If we get here, we couldn't do it for some reason even though we
1519 originally thought we could. Delete anything we've emitted in
1522 delete_insns_since (last
);
1525 /* Open-code the vector operations if we have no hardware support
1527 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1528 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1529 unsignedp
, methods
);
1531 /* We need to open-code the complex type operations: '+, -, * and /' */
1533 /* At this point we allow operations between two similar complex
1534 numbers, and also if one of the operands is not a complex number
1535 but rather of MODE_FLOAT or MODE_INT. However, the caller
1536 must make sure that the MODE of the non-complex operand matches
1537 the SUBMODE of the complex operand. */
1539 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1541 rtx real0
= 0, imag0
= 0;
1542 rtx real1
= 0, imag1
= 0;
1543 rtx realr
, imagr
, res
;
1548 /* Find the correct mode for the real and imaginary parts */
1549 enum machine_mode submode
= GET_MODE_INNER(mode
);
1551 if (submode
== BLKmode
)
1555 target
= gen_reg_rtx (mode
);
1559 realr
= gen_realpart (submode
, target
);
1560 imagr
= gen_imagpart (submode
, target
);
1562 if (GET_MODE (op0
) == mode
)
1564 real0
= gen_realpart (submode
, op0
);
1565 imag0
= gen_imagpart (submode
, op0
);
1570 if (GET_MODE (op1
) == mode
)
1572 real1
= gen_realpart (submode
, op1
);
1573 imag1
= gen_imagpart (submode
, op1
);
1578 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1581 switch (binoptab
->code
)
1584 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1586 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1587 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1588 realr
, unsignedp
, methods
);
1592 else if (res
!= realr
)
1593 emit_move_insn (realr
, res
);
1595 if (imag0
!= 0 && imag1
!= 0)
1596 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1597 imagr
, unsignedp
, methods
);
1598 else if (imag0
!= 0)
1600 else if (binoptab
->code
== MINUS
)
1601 res
= expand_unop (submode
,
1602 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1603 imag1
, imagr
, unsignedp
);
1609 else if (res
!= imagr
)
1610 emit_move_insn (imagr
, res
);
1616 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1618 if (imag0
!= 0 && imag1
!= 0)
1622 /* Don't fetch these from memory more than once. */
1623 real0
= force_reg (submode
, real0
);
1624 real1
= force_reg (submode
, real1
);
1625 imag0
= force_reg (submode
, imag0
);
1626 imag1
= force_reg (submode
, imag1
);
1628 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1629 unsignedp
, methods
);
1631 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1632 unsignedp
, methods
);
1634 if (temp1
== 0 || temp2
== 0)
1639 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1640 temp1
, temp2
, realr
, unsignedp
, methods
));
1644 else if (res
!= realr
)
1645 emit_move_insn (realr
, res
);
1647 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1648 NULL_RTX
, unsignedp
, methods
);
1650 /* Avoid expanding redundant multiplication for the common
1651 case of squaring a complex number. */
1652 if (rtx_equal_p (real0
, real1
) && rtx_equal_p (imag0
, imag1
))
1655 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1656 NULL_RTX
, unsignedp
, methods
);
1658 if (temp1
== 0 || temp2
== 0)
1663 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1664 temp1
, temp2
, imagr
, unsignedp
, methods
));
1668 else if (res
!= imagr
)
1669 emit_move_insn (imagr
, res
);
1675 /* Don't fetch these from memory more than once. */
1676 real0
= force_reg (submode
, real0
);
1677 real1
= force_reg (submode
, real1
);
1679 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1680 realr
, unsignedp
, methods
);
1683 else if (res
!= realr
)
1684 emit_move_insn (realr
, res
);
1687 res
= expand_binop (submode
, binoptab
,
1688 real1
, imag0
, imagr
, unsignedp
, methods
);
1690 res
= expand_binop (submode
, binoptab
,
1691 real0
, imag1
, imagr
, unsignedp
, methods
);
1695 else if (res
!= imagr
)
1696 emit_move_insn (imagr
, res
);
1703 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1707 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1709 /* Don't fetch these from memory more than once. */
1710 real1
= force_reg (submode
, real1
);
1712 /* Simply divide the real and imaginary parts by `c' */
1713 if (class == MODE_COMPLEX_FLOAT
)
1714 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1715 realr
, unsignedp
, methods
);
1717 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1718 real0
, real1
, realr
, unsignedp
);
1722 else if (res
!= realr
)
1723 emit_move_insn (realr
, res
);
1725 if (class == MODE_COMPLEX_FLOAT
)
1726 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1727 imagr
, unsignedp
, methods
);
1729 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1730 imag0
, real1
, imagr
, unsignedp
);
1734 else if (res
!= imagr
)
1735 emit_move_insn (imagr
, res
);
1741 switch (flag_complex_divide_method
)
1744 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1745 realr
, imagr
, submode
,
1751 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1752 realr
, imagr
, submode
,
1772 if (binoptab
->code
!= UNKNOWN
)
1774 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1775 copy_rtx (op0
), copy_rtx (op1
));
1779 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1785 /* It can't be open-coded in this mode.
1786 Use a library call if one is available and caller says that's ok. */
1788 if (binoptab
->handlers
[(int) mode
].libfunc
1789 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1793 enum machine_mode op1_mode
= mode
;
1800 op1_mode
= word_mode
;
1801 /* Specify unsigned here,
1802 since negative shift counts are meaningless. */
1803 op1x
= convert_to_mode (word_mode
, op1
, 1);
1806 if (GET_MODE (op0
) != VOIDmode
1807 && GET_MODE (op0
) != mode
)
1808 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1810 /* Pass 1 for NO_QUEUE so we don't lose any increments
1811 if the libcall is cse'd or moved. */
1812 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1813 NULL_RTX
, LCT_CONST
, mode
, 2,
1814 op0
, mode
, op1x
, op1_mode
);
1816 insns
= get_insns ();
1819 target
= gen_reg_rtx (mode
);
1820 emit_libcall_block (insns
, target
, value
,
1821 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1826 delete_insns_since (last
);
1828 /* It can't be done in this mode. Can we do it in a wider mode? */
1830 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1831 || methods
== OPTAB_MUST_WIDEN
))
1833 /* Caller says, don't even try. */
1834 delete_insns_since (entry_last
);
1838 /* Compute the value of METHODS to pass to recursive calls.
1839 Don't allow widening to be tried recursively. */
1841 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1843 /* Look for a wider mode of the same class for which it appears we can do
1846 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1848 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1849 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1851 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1852 != CODE_FOR_nothing
)
1853 || (methods
== OPTAB_LIB
1854 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1856 rtx xop0
= op0
, xop1
= op1
;
1859 /* For certain integer operations, we need not actually extend
1860 the narrow operands, as long as we will truncate
1861 the results to the same narrowness. */
1863 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1864 || binoptab
== xor_optab
1865 || binoptab
== add_optab
|| binoptab
== sub_optab
1866 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1867 && class == MODE_INT
)
1870 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1871 unsignedp
, no_extend
);
1873 /* The second operand of a shift must always be extended. */
1874 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1875 no_extend
&& binoptab
!= ashl_optab
);
1877 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1878 unsignedp
, methods
);
1881 if (class != MODE_INT
)
1884 target
= gen_reg_rtx (mode
);
1885 convert_move (target
, temp
, 0);
1889 return gen_lowpart (mode
, temp
);
1892 delete_insns_since (last
);
1897 delete_insns_since (entry_last
);
1901 /* Like expand_binop, but for open-coding vectors binops. */
1904 expand_vector_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
1905 enum machine_mode mode
;
1910 enum optab_methods methods
;
1912 enum machine_mode submode
, tmode
;
1913 int size
, elts
, subsize
, subbitsize
, i
;
1914 rtx t
, a
, b
, res
, seq
;
1915 enum mode_class
class;
1917 class = GET_MODE_CLASS (mode
);
1919 size
= GET_MODE_SIZE (mode
);
1920 submode
= GET_MODE_INNER (mode
);
1922 /* Search for the widest vector mode with the same inner mode that is
1923 still narrower than MODE and that allows to open-code this operator.
1924 Note, if we find such a mode and the handler later decides it can't
1925 do the expansion, we'll be called recursively with the narrower mode. */
1926 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1927 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1928 tmode
= GET_MODE_WIDER_MODE (tmode
))
1930 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1931 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1935 switch (binoptab
->code
)
1940 tmode
= int_mode_for_mode (mode
);
1941 if (tmode
!= BLKmode
)
1947 subsize
= GET_MODE_SIZE (submode
);
1948 subbitsize
= GET_MODE_BITSIZE (submode
);
1949 elts
= size
/ subsize
;
1951 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1952 but that we operate on more than one element at a time. */
1953 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1958 /* Errors can leave us with a const0_rtx as operand. */
1959 if (GET_MODE (op0
) != mode
)
1960 op0
= copy_to_mode_reg (mode
, op0
);
1961 if (GET_MODE (op1
) != mode
)
1962 op1
= copy_to_mode_reg (mode
, op1
);
1965 target
= gen_reg_rtx (mode
);
1967 for (i
= 0; i
< elts
; ++i
)
1969 /* If this is part of a register, and not the first item in the
1970 word, we can't store using a SUBREG - that would clobber
1972 And storing with a SUBREG is only possible for the least
1973 significant part, hence we can't do it for big endian
1974 (unless we want to permute the evaluation order. */
1975 if (GET_CODE (target
) == REG
1976 && (BYTES_BIG_ENDIAN
1977 ? subsize
< UNITS_PER_WORD
1978 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1981 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1982 if (CONSTANT_P (op0
))
1983 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1985 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1986 NULL_RTX
, submode
, submode
, size
);
1987 if (CONSTANT_P (op1
))
1988 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1990 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1991 NULL_RTX
, submode
, submode
, size
);
1993 if (binoptab
->code
== DIV
)
1995 if (class == MODE_VECTOR_FLOAT
)
1996 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1997 unsignedp
, methods
);
1999 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
2000 a
, b
, t
, unsignedp
);
2003 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
2004 unsignedp
, methods
);
2010 emit_move_insn (t
, res
);
2012 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2028 /* Like expand_unop but for open-coding vector unops. */
2031 expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2032 enum machine_mode mode
;
2038 enum machine_mode submode
, tmode
;
2039 int size
, elts
, subsize
, subbitsize
, i
;
2042 size
= GET_MODE_SIZE (mode
);
2043 submode
= GET_MODE_INNER (mode
);
2045 /* Search for the widest vector mode with the same inner mode that is
2046 still narrower than MODE and that allows to open-code this operator.
2047 Note, if we find such a mode and the handler later decides it can't
2048 do the expansion, we'll be called recursively with the narrower mode. */
2049 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2050 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2051 tmode
= GET_MODE_WIDER_MODE (tmode
))
2053 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2054 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2057 /* If there is no negate operation, try doing a subtract from zero. */
2058 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2059 /* Avoid infinite recursion when an
2060 error has left us with the wrong mode. */
2061 && GET_MODE (op0
) == mode
)
2064 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2065 target
, unsignedp
, OPTAB_DIRECT
);
2070 if (unoptab
== one_cmpl_optab
)
2072 tmode
= int_mode_for_mode (mode
);
2073 if (tmode
!= BLKmode
)
2077 subsize
= GET_MODE_SIZE (submode
);
2078 subbitsize
= GET_MODE_BITSIZE (submode
);
2079 elts
= size
/ subsize
;
2081 /* Errors can leave us with a const0_rtx as operand. */
2082 if (GET_MODE (op0
) != mode
)
2083 op0
= copy_to_mode_reg (mode
, op0
);
2086 target
= gen_reg_rtx (mode
);
2090 for (i
= 0; i
< elts
; ++i
)
2092 /* If this is part of a register, and not the first item in the
2093 word, we can't store using a SUBREG - that would clobber
2095 And storing with a SUBREG is only possible for the least
2096 significant part, hence we can't do it for big endian
2097 (unless we want to permute the evaluation order. */
2098 if (GET_CODE (target
) == REG
2099 && (BYTES_BIG_ENDIAN
2100 ? subsize
< UNITS_PER_WORD
2101 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2104 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2105 if (CONSTANT_P (op0
))
2106 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2108 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2109 t
, submode
, submode
, size
);
2111 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2114 emit_move_insn (t
, res
);
2116 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2127 /* Expand a binary operator which has both signed and unsigned forms.
2128 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2131 If we widen unsigned operands, we may use a signed wider operation instead
2132 of an unsigned wider operation, since the result would be the same. */
2135 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
2136 enum machine_mode mode
;
2137 optab uoptab
, soptab
;
2138 rtx op0
, op1
, target
;
2140 enum optab_methods methods
;
2143 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2144 struct optab wide_soptab
;
2146 /* Do it without widening, if possible. */
2147 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2148 unsignedp
, OPTAB_DIRECT
);
2149 if (temp
|| methods
== OPTAB_DIRECT
)
2152 /* Try widening to a signed int. Make a fake signed optab that
2153 hides any signed insn for direct use. */
2154 wide_soptab
= *soptab
;
2155 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2156 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2158 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2159 unsignedp
, OPTAB_WIDEN
);
2161 /* For unsigned operands, try widening to an unsigned int. */
2162 if (temp
== 0 && unsignedp
)
2163 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2164 unsignedp
, OPTAB_WIDEN
);
2165 if (temp
|| methods
== OPTAB_WIDEN
)
2168 /* Use the right width lib call if that exists. */
2169 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2170 if (temp
|| methods
== OPTAB_LIB
)
2173 /* Must widen and use a lib call, use either signed or unsigned. */
2174 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2175 unsignedp
, methods
);
2179 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2180 unsignedp
, methods
);
2184 /* Generate code to perform an operation specified by BINOPTAB
2185 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2186 We assume that the order of the operands for the instruction
2187 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2188 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2190 Either TARG0 or TARG1 may be zero, but what that means is that
2191 the result is not actually wanted. We will generate it into
2192 a dummy pseudo-reg and discard it. They may not both be zero.
2194 Returns 1 if this operation can be performed; 0 if not. */
2197 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
2203 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2204 enum mode_class
class;
2205 enum machine_mode wider_mode
;
2206 rtx entry_last
= get_last_insn ();
2209 class = GET_MODE_CLASS (mode
);
2211 op0
= protect_from_queue (op0
, 0);
2212 op1
= protect_from_queue (op1
, 0);
2216 op0
= force_not_mem (op0
);
2217 op1
= force_not_mem (op1
);
2220 /* If we are inside an appropriately-short loop and one operand is an
2221 expensive constant, force it into a register. */
2222 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2223 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2224 op0
= force_reg (mode
, op0
);
2226 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2227 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2228 op1
= force_reg (mode
, op1
);
2231 targ0
= protect_from_queue (targ0
, 1);
2233 targ0
= gen_reg_rtx (mode
);
2235 targ1
= protect_from_queue (targ1
, 1);
2237 targ1
= gen_reg_rtx (mode
);
2239 /* Record where to go back to if we fail. */
2240 last
= get_last_insn ();
2242 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2244 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2245 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2246 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2248 rtx xop0
= op0
, xop1
= op1
;
2250 /* In case the insn wants input operands in modes different from
2251 those of the actual operands, convert the operands. It would
2252 seem that we don't need to convert CONST_INTs, but we do, so
2253 that they're properly zero-extended, sign-extended or truncated
2256 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2257 xop0
= convert_modes (mode0
,
2258 GET_MODE (op0
) != VOIDmode
2263 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2264 xop1
= convert_modes (mode1
,
2265 GET_MODE (op1
) != VOIDmode
2270 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2271 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2272 xop0
= copy_to_mode_reg (mode0
, xop0
);
2274 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2275 xop1
= copy_to_mode_reg (mode1
, xop1
);
2277 /* We could handle this, but we should always be called with a pseudo
2278 for our targets and all insns should take them as outputs. */
2279 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2280 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2283 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2290 delete_insns_since (last
);
2293 /* It can't be done in this mode. Can we do it in a wider mode? */
2295 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2297 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2298 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2300 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2301 != CODE_FOR_nothing
)
2303 rtx t0
= gen_reg_rtx (wider_mode
);
2304 rtx t1
= gen_reg_rtx (wider_mode
);
2305 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2306 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2308 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2311 convert_move (targ0
, t0
, unsignedp
);
2312 convert_move (targ1
, t1
, unsignedp
);
2316 delete_insns_since (last
);
2321 delete_insns_since (entry_last
);
2325 /* Wrapper around expand_unop which takes an rtx code to specify
2326 the operation to perform, not an optab pointer. All other
2327 arguments are the same. */
2329 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2330 enum machine_mode mode
;
2336 optab unop
= code_to_optab
[(int) code
];
2340 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2346 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2348 widen_clz (mode
, op0
, target
)
2349 enum machine_mode mode
;
2353 enum mode_class
class = GET_MODE_CLASS (mode
);
2354 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2356 enum machine_mode wider_mode
;
2357 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2358 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2360 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2361 != CODE_FOR_nothing
)
2363 rtx xop0
, temp
, last
;
2365 last
= get_last_insn ();
2368 target
= gen_reg_rtx (mode
);
2369 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2370 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2372 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2373 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2374 - GET_MODE_BITSIZE (mode
)),
2375 target
, true, OPTAB_DIRECT
);
2377 delete_insns_since (last
);
2386 /* Try calculating (parity x) as (and (popcount x) 1), where
2387 popcount can also be done in a wider mode. */
2389 expand_parity (mode
, op0
, target
)
2390 enum machine_mode mode
;
2394 enum mode_class
class = GET_MODE_CLASS (mode
);
2395 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2397 enum machine_mode wider_mode
;
2398 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2399 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2401 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2402 != CODE_FOR_nothing
)
2404 rtx xop0
, temp
, last
;
2406 last
= get_last_insn ();
2409 target
= gen_reg_rtx (mode
);
2410 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2411 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2414 temp
= expand_binop (wider_mode
, and_optab
, temp
, GEN_INT (1),
2415 target
, true, OPTAB_DIRECT
);
2417 delete_insns_since (last
);
2426 /* Generate code to perform an operation specified by UNOPTAB
2427 on operand OP0, with result having machine-mode MODE.
2429 UNSIGNEDP is for the case where we have to widen the operands
2430 to perform the operation. It says to use zero-extension.
2432 If TARGET is nonzero, the value
2433 is generated there, if it is convenient to do so.
2434 In all cases an rtx is returned for the locus of the value;
2435 this may or may not be TARGET. */
2438 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2439 enum machine_mode mode
;
2445 enum mode_class
class;
2446 enum machine_mode wider_mode
;
2448 rtx last
= get_last_insn ();
2451 class = GET_MODE_CLASS (mode
);
2453 op0
= protect_from_queue (op0
, 0);
2457 op0
= force_not_mem (op0
);
2461 target
= protect_from_queue (target
, 1);
2463 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2465 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2466 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2472 temp
= gen_reg_rtx (mode
);
2474 if (GET_MODE (xop0
) != VOIDmode
2475 && GET_MODE (xop0
) != mode0
)
2476 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2478 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2480 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2481 xop0
= copy_to_mode_reg (mode0
, xop0
);
2483 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2484 temp
= gen_reg_rtx (mode
);
2486 pat
= GEN_FCN (icode
) (temp
, xop0
);
2489 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2490 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2492 delete_insns_since (last
);
2493 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2501 delete_insns_since (last
);
2504 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2506 /* Widening clz needs special treatment. */
2507 if (unoptab
== clz_optab
)
2509 temp
= widen_clz (mode
, op0
, target
);
2516 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2517 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2518 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2520 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2524 /* For certain operations, we need not actually extend
2525 the narrow operand, as long as we will truncate the
2526 results to the same narrowness. */
2528 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2529 (unoptab
== neg_optab
2530 || unoptab
== one_cmpl_optab
)
2531 && class == MODE_INT
);
2533 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2538 if (class != MODE_INT
)
2541 target
= gen_reg_rtx (mode
);
2542 convert_move (target
, temp
, 0);
2546 return gen_lowpart (mode
, temp
);
2549 delete_insns_since (last
);
2553 /* These can be done a word at a time. */
2554 if (unoptab
== one_cmpl_optab
2555 && class == MODE_INT
2556 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2557 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2562 if (target
== 0 || target
== op0
)
2563 target
= gen_reg_rtx (mode
);
2567 /* Do the actual arithmetic. */
2568 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2570 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2571 rtx x
= expand_unop (word_mode
, unoptab
,
2572 operand_subword_force (op0
, i
, mode
),
2573 target_piece
, unsignedp
);
2575 if (target_piece
!= x
)
2576 emit_move_insn (target_piece
, x
);
2579 insns
= get_insns ();
2582 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2583 gen_rtx_fmt_e (unoptab
->code
, mode
,
2588 /* Open-code the complex negation operation. */
2589 else if (unoptab
->code
== NEG
2590 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2596 /* Find the correct mode for the real and imaginary parts */
2597 enum machine_mode submode
= GET_MODE_INNER (mode
);
2599 if (submode
== BLKmode
)
2603 target
= gen_reg_rtx (mode
);
2607 target_piece
= gen_imagpart (submode
, target
);
2608 x
= expand_unop (submode
, unoptab
,
2609 gen_imagpart (submode
, op0
),
2610 target_piece
, unsignedp
);
2611 if (target_piece
!= x
)
2612 emit_move_insn (target_piece
, x
);
2614 target_piece
= gen_realpart (submode
, target
);
2615 x
= expand_unop (submode
, unoptab
,
2616 gen_realpart (submode
, op0
),
2617 target_piece
, unsignedp
);
2618 if (target_piece
!= x
)
2619 emit_move_insn (target_piece
, x
);
2624 emit_no_conflict_block (seq
, target
, op0
, 0,
2625 gen_rtx_fmt_e (unoptab
->code
, mode
,
2630 /* Try negating floating point values by flipping the sign bit. */
2631 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2632 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2634 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2635 enum machine_mode imode
= int_mode_for_mode (mode
);
2636 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2638 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2640 HOST_WIDE_INT hi
, lo
;
2641 rtx last
= get_last_insn ();
2643 /* Handle targets with different FP word orders. */
2644 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2646 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2647 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2648 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2651 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2654 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2658 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2661 temp
= expand_binop (imode
, xor_optab
,
2662 gen_lowpart (imode
, op0
),
2663 immed_double_const (lo
, hi
, imode
),
2664 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2666 return gen_lowpart (mode
, temp
);
2667 delete_insns_since (last
);
2671 /* Try calculating parity (x) as popcount (x) % 2. */
2672 if (unoptab
== parity_optab
)
2674 temp
= expand_parity (mode
, op0
, target
);
2680 /* Now try a library call in this mode. */
2681 if (unoptab
->handlers
[(int) mode
].libfunc
)
2685 enum machine_mode outmode
= mode
;
2687 /* All of these functions return small values. Thus we choose to
2688 have them return something that isn't a double-word. */
2689 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2690 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2691 outmode
= TYPE_MODE (integer_type_node
);
2695 /* Pass 1 for NO_QUEUE so we don't lose any increments
2696 if the libcall is cse'd or moved. */
2697 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2698 NULL_RTX
, LCT_CONST
, outmode
,
2700 insns
= get_insns ();
2703 target
= gen_reg_rtx (outmode
);
2704 emit_libcall_block (insns
, target
, value
,
2705 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2710 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2711 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2713 /* It can't be done in this mode. Can we do it in a wider mode? */
2715 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2717 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2718 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2720 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2721 != CODE_FOR_nothing
)
2722 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2726 /* For certain operations, we need not actually extend
2727 the narrow operand, as long as we will truncate the
2728 results to the same narrowness. */
2730 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2731 (unoptab
== neg_optab
2732 || unoptab
== one_cmpl_optab
)
2733 && class == MODE_INT
);
2735 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2738 /* If we are generating clz using wider mode, adjust the
2740 if (unoptab
== clz_optab
&& temp
!= 0)
2741 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2742 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2743 - GET_MODE_BITSIZE (mode
)),
2744 target
, true, OPTAB_DIRECT
);
2748 if (class != MODE_INT
)
2751 target
= gen_reg_rtx (mode
);
2752 convert_move (target
, temp
, 0);
2756 return gen_lowpart (mode
, temp
);
2759 delete_insns_since (last
);
2764 /* If there is no negate operation, try doing a subtract from zero.
2765 The US Software GOFAST library needs this. */
2766 if (unoptab
->code
== NEG
)
2769 temp
= expand_binop (mode
,
2770 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2771 CONST0_RTX (mode
), op0
,
2772 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2780 /* Emit code to compute the absolute value of OP0, with result to
2781 TARGET if convenient. (TARGET may be 0.) The return value says
2782 where the result actually is to be found.
2784 MODE is the mode of the operand; the mode of the result is
2785 different but can be deduced from MODE.
2790 expand_abs_nojump (mode
, op0
, target
, result_unsignedp
)
2791 enum machine_mode mode
;
2794 int result_unsignedp
;
2799 result_unsignedp
= 1;
2801 /* First try to do it with a special abs instruction. */
2802 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2807 /* For floating point modes, try clearing the sign bit. */
2808 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2809 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2811 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2812 enum machine_mode imode
= int_mode_for_mode (mode
);
2813 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2815 if (imode
!= BLKmode
&& bitpos
>= 0)
2817 HOST_WIDE_INT hi
, lo
;
2818 rtx last
= get_last_insn ();
2820 /* Handle targets with different FP word orders. */
2821 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2823 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2824 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2825 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2828 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2831 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2835 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2838 temp
= expand_binop (imode
, and_optab
,
2839 gen_lowpart (imode
, op0
),
2840 immed_double_const (~lo
, ~hi
, imode
),
2841 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2843 return gen_lowpart (mode
, temp
);
2844 delete_insns_since (last
);
2848 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2849 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2851 rtx last
= get_last_insn ();
2853 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2855 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2861 delete_insns_since (last
);
2864 /* If this machine has expensive jumps, we can do integer absolute
2865 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2866 where W is the width of MODE. */
2868 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2870 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2871 size_int (GET_MODE_BITSIZE (mode
) - 1),
2874 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2877 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2878 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2888 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2889 enum machine_mode mode
;
2892 int result_unsignedp
;
2897 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2901 /* If that does not win, use conditional jump and negate. */
2903 /* It is safe to use the target if it is the same
2904 as the source if this is also a pseudo register */
2905 if (op0
== target
&& GET_CODE (op0
) == REG
2906 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2909 op1
= gen_label_rtx ();
2910 if (target
== 0 || ! safe
2911 || GET_MODE (target
) != mode
2912 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2913 || (GET_CODE (target
) == REG
2914 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2915 target
= gen_reg_rtx (mode
);
2917 emit_move_insn (target
, op0
);
2920 /* If this mode is an integer too wide to compare properly,
2921 compare word by word. Rely on CSE to optimize constant cases. */
2922 if (GET_MODE_CLASS (mode
) == MODE_INT
2923 && ! can_compare_p (GE
, mode
, ccp_jump
))
2924 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2927 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2928 NULL_RTX
, NULL_RTX
, op1
);
2930 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2933 emit_move_insn (target
, op0
);
2939 /* Emit code to compute the absolute value of OP0, with result to
2940 TARGET if convenient. (TARGET may be 0.) The return value says
2941 where the result actually is to be found.
2943 MODE is the mode of the operand; the mode of the result is
2944 different but can be deduced from MODE.
2946 UNSIGNEDP is relevant for complex integer modes. */
2949 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2950 enum machine_mode mode
;
2955 enum mode_class
class = GET_MODE_CLASS (mode
);
2956 enum machine_mode wider_mode
;
2958 rtx entry_last
= get_last_insn ();
2961 optab this_abs_optab
;
2963 /* Find the correct mode for the real and imaginary parts. */
2964 enum machine_mode submode
= GET_MODE_INNER (mode
);
2966 if (submode
== BLKmode
)
2969 op0
= protect_from_queue (op0
, 0);
2973 op0
= force_not_mem (op0
);
2976 last
= get_last_insn ();
2979 target
= protect_from_queue (target
, 1);
2981 this_abs_optab
= ! unsignedp
&& flag_trapv
2982 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2983 ? absv_optab
: abs_optab
;
2985 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2987 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2988 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2994 temp
= gen_reg_rtx (submode
);
2996 if (GET_MODE (xop0
) != VOIDmode
2997 && GET_MODE (xop0
) != mode0
)
2998 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
3000 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3002 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
3003 xop0
= copy_to_mode_reg (mode0
, xop0
);
3005 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
3006 temp
= gen_reg_rtx (submode
);
3008 pat
= GEN_FCN (icode
) (temp
, xop0
);
3011 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3012 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
3015 delete_insns_since (last
);
3016 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
3025 delete_insns_since (last
);
3028 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3030 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3031 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3033 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3034 != CODE_FOR_nothing
)
3038 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3039 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3043 if (class != MODE_COMPLEX_INT
)
3046 target
= gen_reg_rtx (submode
);
3047 convert_move (target
, temp
, 0);
3051 return gen_lowpart (submode
, temp
);
3054 delete_insns_since (last
);
3058 /* Open-code the complex absolute-value operation
3059 if we can open-code sqrt. Otherwise it's not worth while. */
3060 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
3063 rtx real
, imag
, total
;
3065 real
= gen_realpart (submode
, op0
);
3066 imag
= gen_imagpart (submode
, op0
);
3068 /* Square both parts. */
3069 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
3070 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
3072 /* Sum the parts. */
3073 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
3074 0, OPTAB_LIB_WIDEN
);
3076 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3077 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
3079 delete_insns_since (last
);
3084 /* Now try a library call in this mode. */
3085 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
3092 /* Pass 1 for NO_QUEUE so we don't lose any increments
3093 if the libcall is cse'd or moved. */
3094 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
3095 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
3096 insns
= get_insns ();
3099 target
= gen_reg_rtx (submode
);
3100 emit_libcall_block (insns
, target
, value
,
3101 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
3106 /* It can't be done in this mode. Can we do it in a wider mode? */
3108 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3109 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3111 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3112 != CODE_FOR_nothing
)
3113 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
3117 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3119 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3123 if (class != MODE_COMPLEX_INT
)
3126 target
= gen_reg_rtx (submode
);
3127 convert_move (target
, temp
, 0);
3131 return gen_lowpart (submode
, temp
);
3134 delete_insns_since (last
);
3138 delete_insns_since (entry_last
);
3142 /* Generate an instruction whose insn-code is INSN_CODE,
3143 with two operands: an output TARGET and an input OP0.
3144 TARGET *must* be nonzero, and the output is always stored there.
3145 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3146 the value that is stored into TARGET. */
3149 emit_unop_insn (icode
, target
, op0
, code
)
3156 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3159 temp
= target
= protect_from_queue (target
, 1);
3161 op0
= protect_from_queue (op0
, 0);
3163 /* Sign and zero extension from memory is often done specially on
3164 RISC machines, so forcing into a register here can pessimize
3166 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3167 op0
= force_not_mem (op0
);
3169 /* Now, if insn does not accept our operands, put them into pseudos. */
3171 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3172 op0
= copy_to_mode_reg (mode0
, op0
);
3174 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3175 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3176 temp
= gen_reg_rtx (GET_MODE (temp
));
3178 pat
= GEN_FCN (icode
) (temp
, op0
);
3180 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3181 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3186 emit_move_insn (target
, temp
);
3189 /* Emit code to perform a series of operations on a multi-word quantity, one
3192 Such a block is preceded by a CLOBBER of the output, consists of multiple
3193 insns, each setting one word of the output, and followed by a SET copying
3194 the output to itself.
3196 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3197 note indicating that it doesn't conflict with the (also multi-word)
3198 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3201 INSNS is a block of code generated to perform the operation, not including
3202 the CLOBBER and final copy. All insns that compute intermediate values
3203 are first emitted, followed by the block as described above.
3205 TARGET, OP0, and OP1 are the output and inputs of the operations,
3206 respectively. OP1 may be zero for a unary operation.
3208 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3211 If TARGET is not a register, INSNS is simply emitted with no special
3212 processing. Likewise if anything in INSNS is not an INSN or if
3213 there is a libcall block inside INSNS.
3215 The final insn emitted is returned. */
3218 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
3224 rtx prev
, next
, first
, last
, insn
;
3226 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3227 return emit_insn (insns
);
3229 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3230 if (GET_CODE (insn
) != INSN
3231 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3232 return emit_insn (insns
);
3234 /* First emit all insns that do not store into words of the output and remove
3235 these from the list. */
3236 for (insn
= insns
; insn
; insn
= next
)
3241 next
= NEXT_INSN (insn
);
3243 /* Some ports (cris) create an libcall regions at their own. We must
3244 avoid any potential nesting of LIBCALLs. */
3245 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3246 remove_note (insn
, note
);
3247 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3248 remove_note (insn
, note
);
3250 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3251 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3252 set
= PATTERN (insn
);
3253 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3255 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3256 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3258 set
= XVECEXP (PATTERN (insn
), 0, i
);
3266 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3268 if (PREV_INSN (insn
))
3269 NEXT_INSN (PREV_INSN (insn
)) = next
;
3274 PREV_INSN (next
) = PREV_INSN (insn
);
3280 prev
= get_last_insn ();
3282 /* Now write the CLOBBER of the output, followed by the setting of each
3283 of the words, followed by the final copy. */
3284 if (target
!= op0
&& target
!= op1
)
3285 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3287 for (insn
= insns
; insn
; insn
= next
)
3289 next
= NEXT_INSN (insn
);
3292 if (op1
&& GET_CODE (op1
) == REG
)
3293 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3296 if (op0
&& GET_CODE (op0
) == REG
)
3297 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3301 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3302 != CODE_FOR_nothing
)
3304 last
= emit_move_insn (target
, target
);
3306 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3310 last
= get_last_insn ();
3312 /* Remove any existing REG_EQUAL note from "last", or else it will
3313 be mistaken for a note referring to the full contents of the
3314 alleged libcall value when found together with the REG_RETVAL
3315 note added below. An existing note can come from an insn
3316 expansion at "last". */
3317 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3321 first
= get_insns ();
3323 first
= NEXT_INSN (prev
);
3325 /* Encapsulate the block so it gets manipulated as a unit. */
3326 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3328 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3333 /* Emit code to make a call to a constant function or a library call.
3335 INSNS is a list containing all insns emitted in the call.
3336 These insns leave the result in RESULT. Our block is to copy RESULT
3337 to TARGET, which is logically equivalent to EQUIV.
3339 We first emit any insns that set a pseudo on the assumption that these are
3340 loading constants into registers; doing so allows them to be safely cse'ed
3341 between blocks. Then we emit all the other insns in the block, followed by
3342 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3343 note with an operand of EQUIV.
3345 Moving assignments to pseudos outside of the block is done to improve
3346 the generated code, but is not required to generate correct code,
3347 hence being unable to move an assignment is not grounds for not making
3348 a libcall block. There are two reasons why it is safe to leave these
3349 insns inside the block: First, we know that these pseudos cannot be
3350 used in generated RTL outside the block since they are created for
3351 temporary purposes within the block. Second, CSE will not record the
3352 values of anything set inside a libcall block, so we know they must
3353 be dead at the end of the block.
3355 Except for the first group of insns (the ones setting pseudos), the
3356 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3359 emit_libcall_block (insns
, target
, result
, equiv
)
3365 rtx final_dest
= target
;
3366 rtx prev
, next
, first
, last
, insn
;
3368 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3369 into a MEM later. Protect the libcall block from this change. */
3370 if (! REG_P (target
) || REG_USERVAR_P (target
))
3371 target
= gen_reg_rtx (GET_MODE (target
));
3373 /* If we're using non-call exceptions, a libcall corresponding to an
3374 operation that may trap may also trap. */
3375 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3377 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3378 if (GET_CODE (insn
) == CALL_INSN
)
3380 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3382 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3383 remove_note (insn
, note
);
3387 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3388 reg note to indicate that this call cannot throw or execute a nonlocal
3389 goto (unless there is already a REG_EH_REGION note, in which case
3391 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3392 if (GET_CODE (insn
) == CALL_INSN
)
3394 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3397 XEXP (note
, 0) = GEN_INT (-1);
3399 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3403 /* First emit all insns that set pseudos. Remove them from the list as
3404 we go. Avoid insns that set pseudos which were referenced in previous
3405 insns. These can be generated by move_by_pieces, for example,
3406 to update an address. Similarly, avoid insns that reference things
3407 set in previous insns. */
3409 for (insn
= insns
; insn
; insn
= next
)
3411 rtx set
= single_set (insn
);
3414 /* Some ports (cris) create an libcall regions at their own. We must
3415 avoid any potential nesting of LIBCALLs. */
3416 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3417 remove_note (insn
, note
);
3418 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3419 remove_note (insn
, note
);
3421 next
= NEXT_INSN (insn
);
3423 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3424 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3426 || ((! INSN_P(insns
)
3427 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3428 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3429 && ! modified_in_p (SET_SRC (set
), insns
)
3430 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3432 if (PREV_INSN (insn
))
3433 NEXT_INSN (PREV_INSN (insn
)) = next
;
3438 PREV_INSN (next
) = PREV_INSN (insn
);
3444 prev
= get_last_insn ();
3446 /* Write the remaining insns followed by the final copy. */
3448 for (insn
= insns
; insn
; insn
= next
)
3450 next
= NEXT_INSN (insn
);
3455 last
= emit_move_insn (target
, result
);
3456 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3457 != CODE_FOR_nothing
)
3458 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3461 /* Remove any existing REG_EQUAL note from "last", or else it will
3462 be mistaken for a note referring to the full contents of the
3463 libcall value when found together with the REG_RETVAL note added
3464 below. An existing note can come from an insn expansion at
3466 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3469 if (final_dest
!= target
)
3470 emit_move_insn (final_dest
, target
);
3473 first
= get_insns ();
3475 first
= NEXT_INSN (prev
);
3477 /* Encapsulate the block so it gets manipulated as a unit. */
3478 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3480 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3481 when the encapsulated region would not be in one basic block,
3482 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3484 bool attach_libcall_retval_notes
= true;
3485 next
= NEXT_INSN (last
);
3486 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3487 if (control_flow_insn_p (insn
))
3489 attach_libcall_retval_notes
= false;
3493 if (attach_libcall_retval_notes
)
3495 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3497 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3503 /* Generate code to store zero in X. */
3509 emit_move_insn (x
, const0_rtx
);
3512 /* Generate code to store 1 in X
3513 assuming it contains zero beforehand. */
3516 emit_0_to_1_insn (x
)
3519 emit_move_insn (x
, const1_rtx
);
3522 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3523 PURPOSE describes how this comparison will be used. CODE is the rtx
3524 comparison code we will be using.
3526 ??? Actually, CODE is slightly weaker than that. A target is still
3527 required to implement all of the normal bcc operations, but not
3528 required to implement all (or any) of the unordered bcc operations. */
3531 can_compare_p (code
, mode
, purpose
)
3533 enum machine_mode mode
;
3534 enum can_compare_purpose purpose
;
3538 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3540 if (purpose
== ccp_jump
)
3541 return bcc_gen_fctn
[(int) code
] != NULL
;
3542 else if (purpose
== ccp_store_flag
)
3543 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3545 /* There's only one cmov entry point, and it's allowed to fail. */
3548 if (purpose
== ccp_jump
3549 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3551 if (purpose
== ccp_cmov
3552 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3554 if (purpose
== ccp_store_flag
3555 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3558 mode
= GET_MODE_WIDER_MODE (mode
);
3560 while (mode
!= VOIDmode
);
3565 /* This function is called when we are going to emit a compare instruction that
3566 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3568 *PMODE is the mode of the inputs (in case they are const_int).
3569 *PUNSIGNEDP nonzero says that the operands are unsigned;
3570 this matters if they need to be widened.
3572 If they have mode BLKmode, then SIZE specifies the size of both operands.
3574 This function performs all the setup necessary so that the caller only has
3575 to emit a single comparison insn. This setup can involve doing a BLKmode
3576 comparison or emitting a library call to perform the comparison if no insn
3577 is available to handle it.
3578 The values which are passed in through pointers can be modified; the caller
3579 should perform the comparison on the modified values. */
3582 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3584 enum rtx_code
*pcomparison
;
3586 enum machine_mode
*pmode
;
3588 enum can_compare_purpose purpose
;
3590 enum machine_mode mode
= *pmode
;
3591 rtx x
= *px
, y
= *py
;
3592 int unsignedp
= *punsignedp
;
3593 enum mode_class
class;
3595 class = GET_MODE_CLASS (mode
);
3597 /* They could both be VOIDmode if both args are immediate constants,
3598 but we should fold that at an earlier stage.
3599 With no special code here, this will call abort,
3600 reminding the programmer to implement such folding. */
3602 if (mode
!= BLKmode
&& flag_force_mem
)
3604 /* Load duplicate non-volatile operands once. */
3605 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3607 x
= force_not_mem (x
);
3612 x
= force_not_mem (x
);
3613 y
= force_not_mem (y
);
3617 /* If we are inside an appropriately-short loop and one operand is an
3618 expensive constant, force it into a register. */
3619 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3620 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3621 x
= force_reg (mode
, x
);
3623 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3624 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3625 y
= force_reg (mode
, y
);
3628 /* Abort if we have a non-canonical comparison. The RTL documentation
3629 states that canonical comparisons are required only for targets which
3631 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3635 /* Don't let both operands fail to indicate the mode. */
3636 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3637 x
= force_reg (mode
, x
);
3639 /* Handle all BLKmode compares. */
3641 if (mode
== BLKmode
)
3644 enum machine_mode result_mode
;
3645 rtx opalign ATTRIBUTE_UNUSED
3646 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3649 x
= protect_from_queue (x
, 0);
3650 y
= protect_from_queue (y
, 0);
3654 #ifdef HAVE_cmpstrqi
3656 && GET_CODE (size
) == CONST_INT
3657 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3659 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3660 result
= gen_reg_rtx (result_mode
);
3661 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3665 #ifdef HAVE_cmpstrhi
3667 && GET_CODE (size
) == CONST_INT
3668 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3670 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3671 result
= gen_reg_rtx (result_mode
);
3672 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3676 #ifdef HAVE_cmpstrsi
3679 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3680 result
= gen_reg_rtx (result_mode
);
3681 size
= protect_from_queue (size
, 0);
3682 emit_insn (gen_cmpstrsi (result
, x
, y
,
3683 convert_to_mode (SImode
, size
, 1),
3689 #ifdef TARGET_MEM_FUNCTIONS
3690 result
= emit_library_call_value (memcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3691 TYPE_MODE (integer_type_node
), 3,
3692 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3693 convert_to_mode (TYPE_MODE (sizetype
), size
,
3694 TREE_UNSIGNED (sizetype
)),
3695 TYPE_MODE (sizetype
));
3697 result
= emit_library_call_value (bcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3698 TYPE_MODE (integer_type_node
), 3,
3699 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3700 convert_to_mode (TYPE_MODE (integer_type_node
),
3702 TREE_UNSIGNED (integer_type_node
)),
3703 TYPE_MODE (integer_type_node
));
3706 result_mode
= TYPE_MODE (integer_type_node
);
3710 *pmode
= result_mode
;
3716 if (can_compare_p (*pcomparison
, mode
, purpose
))
3719 /* Handle a lib call just for the mode we are using. */
3721 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3723 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3726 /* If we want unsigned, and this mode has a distinct unsigned
3727 comparison routine, use that. */
3728 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3729 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3731 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3732 word_mode
, 2, x
, mode
, y
, mode
);
3734 /* Integer comparison returns a result that must be compared against 1,
3735 so that even if we do an unsigned compare afterward,
3736 there is still a value that can represent the result "less than". */
3743 if (class == MODE_FLOAT
)
3744 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3750 /* Before emitting an insn with code ICODE, make sure that X, which is going
3751 to be used for operand OPNUM of the insn, is converted from mode MODE to
3752 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3753 that it is accepted by the operand predicate. Return the new value. */
3756 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3760 enum machine_mode mode
, wider_mode
;
3763 x
= protect_from_queue (x
, 0);
3765 if (mode
!= wider_mode
)
3766 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3768 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3769 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3770 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3774 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3775 we can do the comparison.
3776 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3777 be NULL_RTX which indicates that only a comparison is to be generated. */
3780 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3782 enum machine_mode mode
;
3783 enum rtx_code comparison
;
3787 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3788 enum mode_class
class = GET_MODE_CLASS (mode
);
3789 enum machine_mode wider_mode
= mode
;
3791 /* Try combined insns first. */
3794 enum insn_code icode
;
3795 PUT_MODE (test
, wider_mode
);
3799 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3801 if (icode
!= CODE_FOR_nothing
3802 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3804 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3805 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3806 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3811 /* Handle some compares against zero. */
3812 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3813 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3815 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3816 emit_insn (GEN_FCN (icode
) (x
));
3818 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3822 /* Handle compares for which there is a directly suitable insn. */
3824 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3825 if (icode
!= CODE_FOR_nothing
)
3827 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3828 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3829 emit_insn (GEN_FCN (icode
) (x
, y
));
3831 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3835 if (class != MODE_INT
&& class != MODE_FLOAT
3836 && class != MODE_COMPLEX_FLOAT
)
3839 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3841 while (wider_mode
!= VOIDmode
);
3846 /* Generate code to compare X with Y so that the condition codes are
3847 set and to jump to LABEL if the condition is true. If X is a
3848 constant and Y is not a constant, then the comparison is swapped to
3849 ensure that the comparison RTL has the canonical form.
3851 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3852 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3853 the proper branch condition code.
3855 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3857 MODE is the mode of the inputs (in case they are const_int).
3859 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3860 be passed unchanged to emit_cmp_insn, then potentially converted into an
3861 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3864 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3866 enum rtx_code comparison
;
3868 enum machine_mode mode
;
3872 rtx op0
= x
, op1
= y
;
3874 /* Swap operands and condition to ensure canonical RTL. */
3875 if (swap_commutative_operands_p (x
, y
))
3877 /* If we're not emitting a branch, this means some caller
3883 comparison
= swap_condition (comparison
);
3887 /* If OP0 is still a constant, then both X and Y must be constants. Force
3888 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3890 if (CONSTANT_P (op0
))
3891 op0
= force_reg (mode
, op0
);
3896 comparison
= unsigned_condition (comparison
);
3898 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3900 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3903 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3906 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3908 enum rtx_code comparison
;
3910 enum machine_mode mode
;
3913 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3916 /* Emit a library call comparison between floating point X and Y.
3917 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3920 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3922 enum rtx_code
*pcomparison
;
3923 enum machine_mode
*pmode
;
3926 enum rtx_code comparison
= *pcomparison
;
3928 rtx x
= *px
= protect_from_queue (*px
, 0);
3929 rtx y
= *py
= protect_from_queue (*py
, 0);
3930 enum machine_mode mode
= GET_MODE (x
);
3938 libfunc
= eqhf2_libfunc
;
3942 libfunc
= nehf2_libfunc
;
3946 libfunc
= gthf2_libfunc
;
3947 if (libfunc
== NULL_RTX
)
3949 tmp
= x
; x
= y
; y
= tmp
;
3951 libfunc
= lthf2_libfunc
;
3956 libfunc
= gehf2_libfunc
;
3957 if (libfunc
== NULL_RTX
)
3959 tmp
= x
; x
= y
; y
= tmp
;
3961 libfunc
= lehf2_libfunc
;
3966 libfunc
= lthf2_libfunc
;
3967 if (libfunc
== NULL_RTX
)
3969 tmp
= x
; x
= y
; y
= tmp
;
3971 libfunc
= gthf2_libfunc
;
3976 libfunc
= lehf2_libfunc
;
3977 if (libfunc
== NULL_RTX
)
3979 tmp
= x
; x
= y
; y
= tmp
;
3981 libfunc
= gehf2_libfunc
;
3986 libfunc
= unordhf2_libfunc
;
3992 else if (mode
== SFmode
)
3996 libfunc
= eqsf2_libfunc
;
4000 libfunc
= nesf2_libfunc
;
4004 libfunc
= gtsf2_libfunc
;
4005 if (libfunc
== NULL_RTX
)
4007 tmp
= x
; x
= y
; y
= tmp
;
4009 libfunc
= ltsf2_libfunc
;
4014 libfunc
= gesf2_libfunc
;
4015 if (libfunc
== NULL_RTX
)
4017 tmp
= x
; x
= y
; y
= tmp
;
4019 libfunc
= lesf2_libfunc
;
4024 libfunc
= ltsf2_libfunc
;
4025 if (libfunc
== NULL_RTX
)
4027 tmp
= x
; x
= y
; y
= tmp
;
4029 libfunc
= gtsf2_libfunc
;
4034 libfunc
= lesf2_libfunc
;
4035 if (libfunc
== NULL_RTX
)
4037 tmp
= x
; x
= y
; y
= tmp
;
4039 libfunc
= gesf2_libfunc
;
4044 libfunc
= unordsf2_libfunc
;
4050 else if (mode
== DFmode
)
4054 libfunc
= eqdf2_libfunc
;
4058 libfunc
= nedf2_libfunc
;
4062 libfunc
= gtdf2_libfunc
;
4063 if (libfunc
== NULL_RTX
)
4065 tmp
= x
; x
= y
; y
= tmp
;
4067 libfunc
= ltdf2_libfunc
;
4072 libfunc
= gedf2_libfunc
;
4073 if (libfunc
== NULL_RTX
)
4075 tmp
= x
; x
= y
; y
= tmp
;
4077 libfunc
= ledf2_libfunc
;
4082 libfunc
= ltdf2_libfunc
;
4083 if (libfunc
== NULL_RTX
)
4085 tmp
= x
; x
= y
; y
= tmp
;
4087 libfunc
= gtdf2_libfunc
;
4092 libfunc
= ledf2_libfunc
;
4093 if (libfunc
== NULL_RTX
)
4095 tmp
= x
; x
= y
; y
= tmp
;
4097 libfunc
= gedf2_libfunc
;
4102 libfunc
= unorddf2_libfunc
;
4108 else if (mode
== XFmode
)
4112 libfunc
= eqxf2_libfunc
;
4116 libfunc
= nexf2_libfunc
;
4120 libfunc
= gtxf2_libfunc
;
4121 if (libfunc
== NULL_RTX
)
4123 tmp
= x
; x
= y
; y
= tmp
;
4125 libfunc
= ltxf2_libfunc
;
4130 libfunc
= gexf2_libfunc
;
4131 if (libfunc
== NULL_RTX
)
4133 tmp
= x
; x
= y
; y
= tmp
;
4135 libfunc
= lexf2_libfunc
;
4140 libfunc
= ltxf2_libfunc
;
4141 if (libfunc
== NULL_RTX
)
4143 tmp
= x
; x
= y
; y
= tmp
;
4145 libfunc
= gtxf2_libfunc
;
4150 libfunc
= lexf2_libfunc
;
4151 if (libfunc
== NULL_RTX
)
4153 tmp
= x
; x
= y
; y
= tmp
;
4155 libfunc
= gexf2_libfunc
;
4160 libfunc
= unordxf2_libfunc
;
4166 else if (mode
== TFmode
)
4170 libfunc
= eqtf2_libfunc
;
4174 libfunc
= netf2_libfunc
;
4178 libfunc
= gttf2_libfunc
;
4179 if (libfunc
== NULL_RTX
)
4181 tmp
= x
; x
= y
; y
= tmp
;
4183 libfunc
= lttf2_libfunc
;
4188 libfunc
= getf2_libfunc
;
4189 if (libfunc
== NULL_RTX
)
4191 tmp
= x
; x
= y
; y
= tmp
;
4193 libfunc
= letf2_libfunc
;
4198 libfunc
= lttf2_libfunc
;
4199 if (libfunc
== NULL_RTX
)
4201 tmp
= x
; x
= y
; y
= tmp
;
4203 libfunc
= gttf2_libfunc
;
4208 libfunc
= letf2_libfunc
;
4209 if (libfunc
== NULL_RTX
)
4211 tmp
= x
; x
= y
; y
= tmp
;
4213 libfunc
= getf2_libfunc
;
4218 libfunc
= unordtf2_libfunc
;
4226 enum machine_mode wider_mode
;
4228 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
4229 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
4231 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
4232 != CODE_FOR_nothing
)
4233 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
4235 x
= protect_from_queue (x
, 0);
4236 y
= protect_from_queue (y
, 0);
4237 *px
= convert_to_mode (wider_mode
, x
, 0);
4238 *py
= convert_to_mode (wider_mode
, y
, 0);
4239 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4249 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4250 word_mode
, 2, x
, mode
, y
, mode
);
4254 if (comparison
== UNORDERED
)
4256 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
4257 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4263 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4266 emit_indirect_jump (loc
)
4269 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
4271 loc
= copy_to_mode_reg (Pmode
, loc
);
4273 emit_jump_insn (gen_indirect_jump (loc
));
4277 #ifdef HAVE_conditional_move
4279 /* Emit a conditional move instruction if the machine supports one for that
4280 condition and machine mode.
4282 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4283 the mode to use should they be constants. If it is VOIDmode, they cannot
4286 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4287 should be stored there. MODE is the mode to use should they be constants.
4288 If it is VOIDmode, they cannot both be constants.
4290 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4291 is not supported. */
4294 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
4299 enum machine_mode cmode
;
4301 enum machine_mode mode
;
4304 rtx tem
, subtarget
, comparison
, insn
;
4305 enum insn_code icode
;
4306 enum rtx_code reversed
;
4308 /* If one operand is constant, make it the second one. Only do this
4309 if the other operand is not constant as well. */
4311 if (swap_commutative_operands_p (op0
, op1
))
4316 code
= swap_condition (code
);
4319 /* get_condition will prefer to generate LT and GT even if the old
4320 comparison was against zero, so undo that canonicalization here since
4321 comparisons against zero are cheaper. */
4322 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4323 code
= LE
, op1
= const0_rtx
;
4324 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4325 code
= GE
, op1
= const0_rtx
;
4327 if (cmode
== VOIDmode
)
4328 cmode
= GET_MODE (op0
);
4330 if (swap_commutative_operands_p (op2
, op3
)
4331 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4340 if (mode
== VOIDmode
)
4341 mode
= GET_MODE (op2
);
4343 icode
= movcc_gen_code
[mode
];
4345 if (icode
== CODE_FOR_nothing
)
4350 op2
= force_not_mem (op2
);
4351 op3
= force_not_mem (op3
);
4355 target
= protect_from_queue (target
, 1);
4357 target
= gen_reg_rtx (mode
);
4363 op2
= protect_from_queue (op2
, 0);
4364 op3
= protect_from_queue (op3
, 0);
4366 /* If the insn doesn't accept these operands, put them in pseudos. */
4368 if (! (*insn_data
[icode
].operand
[0].predicate
)
4369 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4370 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4372 if (! (*insn_data
[icode
].operand
[2].predicate
)
4373 (op2
, insn_data
[icode
].operand
[2].mode
))
4374 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4376 if (! (*insn_data
[icode
].operand
[3].predicate
)
4377 (op3
, insn_data
[icode
].operand
[3].mode
))
4378 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4380 /* Everything should now be in the suitable form, so emit the compare insn
4381 and then the conditional move. */
4384 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4386 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4387 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4388 return NULL and let the caller figure out how best to deal with this
4390 if (GET_CODE (comparison
) != code
)
4393 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4395 /* If that failed, then give up. */
4401 if (subtarget
!= target
)
4402 convert_move (target
, subtarget
, 0);
4407 /* Return nonzero if a conditional move of mode MODE is supported.
4409 This function is for combine so it can tell whether an insn that looks
4410 like a conditional move is actually supported by the hardware. If we
4411 guess wrong we lose a bit on optimization, but that's it. */
4412 /* ??? sparc64 supports conditionally moving integers values based on fp
4413 comparisons, and vice versa. How do we handle them? */
4416 can_conditionally_move_p (mode
)
4417 enum machine_mode mode
;
4419 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4425 #endif /* HAVE_conditional_move */
4427 /* Emit a conditional addition instruction if the machine supports one for that
4428 condition and machine mode.
4430 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4431 the mode to use should they be constants. If it is VOIDmode, they cannot
4434 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4435 should be stored there. MODE is the mode to use should they be constants.
4436 If it is VOIDmode, they cannot both be constants.
4438 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4439 is not supported. */
4442 emit_conditional_add (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
4447 enum machine_mode cmode
;
4449 enum machine_mode mode
;
4452 rtx tem
, subtarget
, comparison
, insn
;
4453 enum insn_code icode
;
4454 enum rtx_code reversed
;
4456 /* If one operand is constant, make it the second one. Only do this
4457 if the other operand is not constant as well. */
4459 if (swap_commutative_operands_p (op0
, op1
))
4464 code
= swap_condition (code
);
4467 /* get_condition will prefer to generate LT and GT even if the old
4468 comparison was against zero, so undo that canonicalization here since
4469 comparisons against zero are cheaper. */
4470 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4471 code
= LE
, op1
= const0_rtx
;
4472 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4473 code
= GE
, op1
= const0_rtx
;
4475 if (cmode
== VOIDmode
)
4476 cmode
= GET_MODE (op0
);
4478 if (swap_commutative_operands_p (op2
, op3
)
4479 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4488 if (mode
== VOIDmode
)
4489 mode
= GET_MODE (op2
);
4491 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4493 if (icode
== CODE_FOR_nothing
)
4498 op2
= force_not_mem (op2
);
4499 op3
= force_not_mem (op3
);
4503 target
= protect_from_queue (target
, 1);
4505 target
= gen_reg_rtx (mode
);
4511 op2
= protect_from_queue (op2
, 0);
4512 op3
= protect_from_queue (op3
, 0);
4514 /* If the insn doesn't accept these operands, put them in pseudos. */
4516 if (! (*insn_data
[icode
].operand
[0].predicate
)
4517 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4518 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4520 if (! (*insn_data
[icode
].operand
[2].predicate
)
4521 (op2
, insn_data
[icode
].operand
[2].mode
))
4522 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4524 if (! (*insn_data
[icode
].operand
[3].predicate
)
4525 (op3
, insn_data
[icode
].operand
[3].mode
))
4526 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4528 /* Everything should now be in the suitable form, so emit the compare insn
4529 and then the conditional move. */
4532 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4534 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4535 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4536 return NULL and let the caller figure out how best to deal with this
4538 if (GET_CODE (comparison
) != code
)
4541 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4543 /* If that failed, then give up. */
4549 if (subtarget
!= target
)
4550 convert_move (target
, subtarget
, 0);
4555 /* These functions attempt to generate an insn body, rather than
4556 emitting the insn, but if the gen function already emits them, we
4557 make no attempt to turn them back into naked patterns.
4559 They do not protect from queued increments,
4560 because they may be used 1) in protect_from_queue itself
4561 and 2) in other passes where there is no queue. */
4563 /* Generate and return an insn body to add Y to X. */
4566 gen_add2_insn (x
, y
)
4569 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4571 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4572 (x
, insn_data
[icode
].operand
[0].mode
))
4573 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4574 (x
, insn_data
[icode
].operand
[1].mode
))
4575 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4576 (y
, insn_data
[icode
].operand
[2].mode
)))
4579 return (GEN_FCN (icode
) (x
, x
, y
));
4582 /* Generate and return an insn body to add r1 and c,
4583 storing the result in r0. */
4585 gen_add3_insn (r0
, r1
, c
)
4588 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4590 if (icode
== CODE_FOR_nothing
4591 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4592 (r0
, insn_data
[icode
].operand
[0].mode
))
4593 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4594 (r1
, insn_data
[icode
].operand
[1].mode
))
4595 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4596 (c
, insn_data
[icode
].operand
[2].mode
)))
4599 return (GEN_FCN (icode
) (r0
, r1
, c
));
4603 have_add2_insn (x
, y
)
4608 if (GET_MODE (x
) == VOIDmode
)
4611 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4613 if (icode
== CODE_FOR_nothing
)
4616 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4617 (x
, insn_data
[icode
].operand
[0].mode
))
4618 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4619 (x
, insn_data
[icode
].operand
[1].mode
))
4620 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4621 (y
, insn_data
[icode
].operand
[2].mode
)))
4627 /* Generate and return an insn body to subtract Y from X. */
4630 gen_sub2_insn (x
, y
)
4633 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4635 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4636 (x
, insn_data
[icode
].operand
[0].mode
))
4637 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4638 (x
, insn_data
[icode
].operand
[1].mode
))
4639 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4640 (y
, insn_data
[icode
].operand
[2].mode
)))
4643 return (GEN_FCN (icode
) (x
, x
, y
));
4646 /* Generate and return an insn body to subtract r1 and c,
4647 storing the result in r0. */
4649 gen_sub3_insn (r0
, r1
, c
)
4652 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4654 if (icode
== CODE_FOR_nothing
4655 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4656 (r0
, insn_data
[icode
].operand
[0].mode
))
4657 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4658 (r1
, insn_data
[icode
].operand
[1].mode
))
4659 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4660 (c
, insn_data
[icode
].operand
[2].mode
)))
4663 return (GEN_FCN (icode
) (r0
, r1
, c
));
4667 have_sub2_insn (x
, y
)
4672 if (GET_MODE (x
) == VOIDmode
)
4675 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4677 if (icode
== CODE_FOR_nothing
)
4680 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4681 (x
, insn_data
[icode
].operand
[0].mode
))
4682 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4683 (x
, insn_data
[icode
].operand
[1].mode
))
4684 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4685 (y
, insn_data
[icode
].operand
[2].mode
)))
4691 /* Generate the body of an instruction to copy Y into X.
4692 It may be a list of insns, if one insn isn't enough. */
4695 gen_move_insn (x
, y
)
4701 emit_move_insn_1 (x
, y
);
4707 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4708 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4709 no such operation exists, CODE_FOR_nothing will be returned. */
4712 can_extend_p (to_mode
, from_mode
, unsignedp
)
4713 enum machine_mode to_mode
, from_mode
;
4716 #ifdef HAVE_ptr_extend
4718 return CODE_FOR_ptr_extend
;
4721 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4724 /* Generate the body of an insn to extend Y (with mode MFROM)
4725 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4728 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4730 enum machine_mode mto
, mfrom
;
4733 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4736 /* can_fix_p and can_float_p say whether the target machine
4737 can directly convert a given fixed point type to
4738 a given floating point type, or vice versa.
4739 The returned value is the CODE_FOR_... value to use,
4740 or CODE_FOR_nothing if these modes cannot be directly converted.
4742 *TRUNCP_PTR is set to 1 if it is necessary to output
4743 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4745 static enum insn_code
4746 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4747 enum machine_mode fltmode
, fixmode
;
4752 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4753 != CODE_FOR_nothing
)
4754 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4756 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4759 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4761 return CODE_FOR_nothing
;
4764 static enum insn_code
4765 can_float_p (fltmode
, fixmode
, unsignedp
)
4766 enum machine_mode fixmode
, fltmode
;
4769 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4772 /* Generate code to convert FROM to floating point
4773 and store in TO. FROM must be fixed point and not VOIDmode.
4774 UNSIGNEDP nonzero means regard FROM as unsigned.
4775 Normally this is done by correcting the final value
4776 if it is negative. */
4779 expand_float (to
, from
, unsignedp
)
4783 enum insn_code icode
;
4785 enum machine_mode fmode
, imode
;
4787 /* Crash now, because we won't be able to decide which mode to use. */
4788 if (GET_MODE (from
) == VOIDmode
)
4791 /* Look for an insn to do the conversion. Do it in the specified
4792 modes if possible; otherwise convert either input, output or both to
4793 wider mode. If the integer mode is wider than the mode of FROM,
4794 we can do the conversion signed even if the input is unsigned. */
4796 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4797 fmode
= GET_MODE_WIDER_MODE (fmode
))
4798 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4799 imode
= GET_MODE_WIDER_MODE (imode
))
4801 int doing_unsigned
= unsignedp
;
4803 if (fmode
!= GET_MODE (to
)
4804 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4807 icode
= can_float_p (fmode
, imode
, unsignedp
);
4808 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4809 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4811 if (icode
!= CODE_FOR_nothing
)
4813 to
= protect_from_queue (to
, 1);
4814 from
= protect_from_queue (from
, 0);
4816 if (imode
!= GET_MODE (from
))
4817 from
= convert_to_mode (imode
, from
, unsignedp
);
4819 if (fmode
!= GET_MODE (to
))
4820 target
= gen_reg_rtx (fmode
);
4822 emit_unop_insn (icode
, target
, from
,
4823 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4826 convert_move (to
, target
, 0);
4831 /* Unsigned integer, and no way to convert directly.
4832 Convert as signed, then conditionally adjust the result. */
4835 rtx label
= gen_label_rtx ();
4837 REAL_VALUE_TYPE offset
;
4841 to
= protect_from_queue (to
, 1);
4842 from
= protect_from_queue (from
, 0);
4845 from
= force_not_mem (from
);
4847 /* Look for a usable floating mode FMODE wider than the source and at
4848 least as wide as the target. Using FMODE will avoid rounding woes
4849 with unsigned values greater than the signed maximum value. */
4851 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4852 fmode
= GET_MODE_WIDER_MODE (fmode
))
4853 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4854 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4857 if (fmode
== VOIDmode
)
4859 /* There is no such mode. Pretend the target is wide enough. */
4860 fmode
= GET_MODE (to
);
4862 /* Avoid double-rounding when TO is narrower than FROM. */
4863 if ((significand_size (fmode
) + 1)
4864 < GET_MODE_BITSIZE (GET_MODE (from
)))
4867 rtx neglabel
= gen_label_rtx ();
4869 /* Don't use TARGET if it isn't a register, is a hard register,
4870 or is the wrong mode. */
4871 if (GET_CODE (target
) != REG
4872 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4873 || GET_MODE (target
) != fmode
)
4874 target
= gen_reg_rtx (fmode
);
4876 imode
= GET_MODE (from
);
4877 do_pending_stack_adjust ();
4879 /* Test whether the sign bit is set. */
4880 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4883 /* The sign bit is not set. Convert as signed. */
4884 expand_float (target
, from
, 0);
4885 emit_jump_insn (gen_jump (label
));
4888 /* The sign bit is set.
4889 Convert to a usable (positive signed) value by shifting right
4890 one bit, while remembering if a nonzero bit was shifted
4891 out; i.e., compute (from & 1) | (from >> 1). */
4893 emit_label (neglabel
);
4894 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4895 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4896 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4898 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4900 expand_float (target
, temp
, 0);
4902 /* Multiply by 2 to undo the shift above. */
4903 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4904 target
, 0, OPTAB_LIB_WIDEN
);
4906 emit_move_insn (target
, temp
);
4908 do_pending_stack_adjust ();
4914 /* If we are about to do some arithmetic to correct for an
4915 unsigned operand, do it in a pseudo-register. */
4917 if (GET_MODE (to
) != fmode
4918 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4919 target
= gen_reg_rtx (fmode
);
4921 /* Convert as signed integer to floating. */
4922 expand_float (target
, from
, 0);
4924 /* If FROM is negative (and therefore TO is negative),
4925 correct its value by 2**bitwidth. */
4927 do_pending_stack_adjust ();
4928 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4932 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4933 temp
= expand_binop (fmode
, add_optab
, target
,
4934 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4935 target
, 0, OPTAB_LIB_WIDEN
);
4937 emit_move_insn (target
, temp
);
4939 do_pending_stack_adjust ();
4944 /* No hardware instruction available; call a library routine to convert from
4945 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4951 to
= protect_from_queue (to
, 1);
4952 from
= protect_from_queue (from
, 0);
4954 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4955 from
= convert_to_mode (SImode
, from
, unsignedp
);
4958 from
= force_not_mem (from
);
4960 if (GET_MODE (to
) == SFmode
)
4962 if (GET_MODE (from
) == SImode
)
4963 libfcn
= floatsisf_libfunc
;
4964 else if (GET_MODE (from
) == DImode
)
4965 libfcn
= floatdisf_libfunc
;
4966 else if (GET_MODE (from
) == TImode
)
4967 libfcn
= floattisf_libfunc
;
4971 else if (GET_MODE (to
) == DFmode
)
4973 if (GET_MODE (from
) == SImode
)
4974 libfcn
= floatsidf_libfunc
;
4975 else if (GET_MODE (from
) == DImode
)
4976 libfcn
= floatdidf_libfunc
;
4977 else if (GET_MODE (from
) == TImode
)
4978 libfcn
= floattidf_libfunc
;
4982 else if (GET_MODE (to
) == XFmode
)
4984 if (GET_MODE (from
) == SImode
)
4985 libfcn
= floatsixf_libfunc
;
4986 else if (GET_MODE (from
) == DImode
)
4987 libfcn
= floatdixf_libfunc
;
4988 else if (GET_MODE (from
) == TImode
)
4989 libfcn
= floattixf_libfunc
;
4993 else if (GET_MODE (to
) == TFmode
)
4995 if (GET_MODE (from
) == SImode
)
4996 libfcn
= floatsitf_libfunc
;
4997 else if (GET_MODE (from
) == DImode
)
4998 libfcn
= floatditf_libfunc
;
4999 else if (GET_MODE (from
) == TImode
)
5000 libfcn
= floattitf_libfunc
;
5009 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
5010 GET_MODE (to
), 1, from
,
5012 insns
= get_insns ();
5015 emit_libcall_block (insns
, target
, value
,
5016 gen_rtx_FLOAT (GET_MODE (to
), from
));
5021 /* Copy result to requested destination
5022 if we have been computing in a temp location. */
5026 if (GET_MODE (target
) == GET_MODE (to
))
5027 emit_move_insn (to
, target
);
5029 convert_move (to
, target
, 0);
5033 /* expand_fix: generate code to convert FROM to fixed point
5034 and store in TO. FROM must be floating point. */
5040 rtx temp
= gen_reg_rtx (GET_MODE (x
));
5041 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
5045 expand_fix (to
, from
, unsignedp
)
5049 enum insn_code icode
;
5051 enum machine_mode fmode
, imode
;
5055 /* We first try to find a pair of modes, one real and one integer, at
5056 least as wide as FROM and TO, respectively, in which we can open-code
5057 this conversion. If the integer mode is wider than the mode of TO,
5058 we can do the conversion either signed or unsigned. */
5060 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5061 fmode
= GET_MODE_WIDER_MODE (fmode
))
5062 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5063 imode
= GET_MODE_WIDER_MODE (imode
))
5065 int doing_unsigned
= unsignedp
;
5067 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5068 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5069 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5071 if (icode
!= CODE_FOR_nothing
)
5073 to
= protect_from_queue (to
, 1);
5074 from
= protect_from_queue (from
, 0);
5076 if (fmode
!= GET_MODE (from
))
5077 from
= convert_to_mode (fmode
, from
, 0);
5080 from
= ftruncify (from
);
5082 if (imode
!= GET_MODE (to
))
5083 target
= gen_reg_rtx (imode
);
5085 emit_unop_insn (icode
, target
, from
,
5086 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5088 convert_move (to
, target
, unsignedp
);
5093 /* For an unsigned conversion, there is one more way to do it.
5094 If we have a signed conversion, we generate code that compares
5095 the real value to the largest representable positive number. If if
5096 is smaller, the conversion is done normally. Otherwise, subtract
5097 one plus the highest signed number, convert, and add it back.
5099 We only need to check all real modes, since we know we didn't find
5100 anything with a wider integer mode.
5102 This code used to extend FP value into mode wider than the destination.
5103 This is not needed. Consider, for instance conversion from SFmode
5106 The hot path trought the code is dealing with inputs smaller than 2^63
5107 and doing just the conversion, so there is no bits to lose.
5109 In the other path we know the value is positive in the range 2^63..2^64-1
5110 inclusive. (as for other imput overflow happens and result is undefined)
5111 So we know that the most important bit set in mantisa corresponds to
5112 2^63. The subtraction of 2^63 should not generate any rounding as it
5113 simply clears out that bit. The rest is trivial. */
5115 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5116 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5117 fmode
= GET_MODE_WIDER_MODE (fmode
))
5118 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
5122 REAL_VALUE_TYPE offset
;
5123 rtx limit
, lab1
, lab2
, insn
;
5125 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5126 real_2expN (&offset
, bitsize
- 1);
5127 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5128 lab1
= gen_label_rtx ();
5129 lab2
= gen_label_rtx ();
5132 to
= protect_from_queue (to
, 1);
5133 from
= protect_from_queue (from
, 0);
5136 from
= force_not_mem (from
);
5138 if (fmode
!= GET_MODE (from
))
5139 from
= convert_to_mode (fmode
, from
, 0);
5141 /* See if we need to do the subtraction. */
5142 do_pending_stack_adjust ();
5143 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5146 /* If not, do the signed "fix" and branch around fixup code. */
5147 expand_fix (to
, from
, 0);
5148 emit_jump_insn (gen_jump (lab2
));
5151 /* Otherwise, subtract 2**(N-1), convert to signed number,
5152 then add 2**(N-1). Do the addition using XOR since this
5153 will often generate better code. */
5155 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5156 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5157 expand_fix (to
, target
, 0);
5158 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5160 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5162 to
, 1, OPTAB_LIB_WIDEN
);
5165 emit_move_insn (to
, target
);
5169 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
5170 != CODE_FOR_nothing
)
5172 /* Make a place for a REG_NOTE and add it. */
5173 insn
= emit_move_insn (to
, to
);
5174 set_unique_reg_note (insn
,
5176 gen_rtx_fmt_e (UNSIGNED_FIX
,
5184 /* We can't do it with an insn, so use a library call. But first ensure
5185 that the mode of TO is at least as wide as SImode, since those are the
5186 only library calls we know about. */
5188 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5190 target
= gen_reg_rtx (SImode
);
5192 expand_fix (target
, from
, unsignedp
);
5194 else if (GET_MODE (from
) == SFmode
)
5196 if (GET_MODE (to
) == SImode
)
5197 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
5198 else if (GET_MODE (to
) == DImode
)
5199 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
5200 else if (GET_MODE (to
) == TImode
)
5201 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
5205 else if (GET_MODE (from
) == DFmode
)
5207 if (GET_MODE (to
) == SImode
)
5208 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
5209 else if (GET_MODE (to
) == DImode
)
5210 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
5211 else if (GET_MODE (to
) == TImode
)
5212 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
5216 else if (GET_MODE (from
) == XFmode
)
5218 if (GET_MODE (to
) == SImode
)
5219 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
5220 else if (GET_MODE (to
) == DImode
)
5221 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
5222 else if (GET_MODE (to
) == TImode
)
5223 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
5227 else if (GET_MODE (from
) == TFmode
)
5229 if (GET_MODE (to
) == SImode
)
5230 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
5231 else if (GET_MODE (to
) == DImode
)
5232 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
5233 else if (GET_MODE (to
) == TImode
)
5234 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
5246 to
= protect_from_queue (to
, 1);
5247 from
= protect_from_queue (from
, 0);
5250 from
= force_not_mem (from
);
5254 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
5255 GET_MODE (to
), 1, from
,
5257 insns
= get_insns ();
5260 emit_libcall_block (insns
, target
, value
,
5261 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5262 GET_MODE (to
), from
));
5267 if (GET_MODE (to
) == GET_MODE (target
))
5268 emit_move_insn (to
, target
);
5270 convert_move (to
, target
, 0);
5274 /* Report whether we have an instruction to perform the operation
5275 specified by CODE on operands of mode MODE. */
5277 have_insn_for (code
, mode
)
5279 enum machine_mode mode
;
5281 return (code_to_optab
[(int) code
] != 0
5282 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
5283 != CODE_FOR_nothing
));
5286 /* Create a blank optab. */
5291 optab op
= (optab
) ggc_alloc (sizeof (struct optab
));
5292 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5294 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
5295 op
->handlers
[i
].libfunc
= 0;
5301 /* Same, but fill in its code as CODE, and write it into the
5302 code_to_optab table. */
5307 optab op
= new_optab ();
5309 code_to_optab
[(int) code
] = op
;
5313 /* Same, but fill in its code as CODE, and do _not_ write it into
5314 the code_to_optab table. */
5319 optab op
= new_optab ();
5324 /* Initialize the libfunc fields of an entire group of entries in some
5325 optab. Each entry is set equal to a string consisting of a leading
5326 pair of underscores followed by a generic operation name followed by
5327 a mode name (downshifted to lower case) followed by a single character
5328 representing the number of operands for the given operation (which is
5329 usually one of the characters '2', '3', or '4').
5331 OPTABLE is the table in which libfunc fields are to be initialized.
5332 FIRST_MODE is the first machine mode index in the given optab to
5334 LAST_MODE is the last machine mode index in the given optab to
5336 OPNAME is the generic (string) name of the operation.
5337 SUFFIX is the character which specifies the number of operands for
5338 the given generic operation.
5342 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
5350 unsigned opname_len
= strlen (opname
);
5352 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5353 mode
= (enum machine_mode
) ((int) mode
+ 1))
5355 const char *mname
= GET_MODE_NAME (mode
);
5356 unsigned mname_len
= strlen (mname
);
5357 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5364 for (q
= opname
; *q
; )
5366 for (q
= mname
; *q
; q
++)
5367 *p
++ = TOLOWER (*q
);
5371 optable
->handlers
[(int) mode
].libfunc
5372 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
5376 /* Initialize the libfunc fields of an entire group of entries in some
5377 optab which correspond to all integer mode operations. The parameters
5378 have the same meaning as similarly named ones for the `init_libfuncs'
5379 routine. (See above). */
5382 init_integral_libfuncs (optable
, opname
, suffix
)
5387 int maxsize
= 2*BITS_PER_WORD
;
5388 if (maxsize
< LONG_LONG_TYPE_SIZE
)
5389 maxsize
= LONG_LONG_TYPE_SIZE
;
5390 init_libfuncs (optable
, word_mode
,
5391 mode_for_size (maxsize
, MODE_INT
, 0),
5395 /* Initialize the libfunc fields of an entire group of entries in some
5396 optab which correspond to all real mode operations. The parameters
5397 have the same meaning as similarly named ones for the `init_libfuncs'
5398 routine. (See above). */
5401 init_floating_libfuncs (optable
, opname
, suffix
)
5406 enum machine_mode fmode
, dmode
, lmode
;
5408 fmode
= float_type_node
? TYPE_MODE (float_type_node
) : VOIDmode
;
5409 dmode
= double_type_node
? TYPE_MODE (double_type_node
) : VOIDmode
;
5410 lmode
= long_double_type_node
? TYPE_MODE (long_double_type_node
) : VOIDmode
;
5412 if (fmode
!= VOIDmode
)
5413 init_libfuncs (optable
, fmode
, fmode
, opname
, suffix
);
5414 if (dmode
!= fmode
&& dmode
!= VOIDmode
)
5415 init_libfuncs (optable
, dmode
, dmode
, opname
, suffix
);
5416 if (lmode
!= dmode
&& lmode
!= VOIDmode
)
5417 init_libfuncs (optable
, lmode
, lmode
, opname
, suffix
);
5421 init_one_libfunc (name
)
5426 /* Create a FUNCTION_DECL that can be passed to
5427 targetm.encode_section_info. */
5428 /* ??? We don't have any type information except for this is
5429 a function. Pretend this is "int foo()". */
5430 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5431 build_function_type (integer_type_node
, NULL_TREE
));
5432 DECL_ARTIFICIAL (decl
) = 1;
5433 DECL_EXTERNAL (decl
) = 1;
5434 TREE_PUBLIC (decl
) = 1;
5436 symbol
= XEXP (DECL_RTL (decl
), 0);
5438 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5439 are the flags assigned by targetm.encode_section_info. */
5440 SYMBOL_REF_DECL (symbol
) = 0;
5445 /* Call this once to initialize the contents of the optabs
5446 appropriately for the current target machine. */
5451 unsigned int i
, j
, k
;
5453 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5455 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
5456 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
5457 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
5458 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
5460 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
5461 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
5462 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
5463 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
5465 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
5466 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
5467 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
5468 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
5470 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
5471 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
5472 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
5473 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
5475 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5476 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5478 #ifdef HAVE_conditional_move
5479 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5480 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5483 add_optab
= init_optab (PLUS
);
5484 addv_optab
= init_optabv (PLUS
);
5485 sub_optab
= init_optab (MINUS
);
5486 subv_optab
= init_optabv (MINUS
);
5487 smul_optab
= init_optab (MULT
);
5488 smulv_optab
= init_optabv (MULT
);
5489 smul_highpart_optab
= init_optab (UNKNOWN
);
5490 umul_highpart_optab
= init_optab (UNKNOWN
);
5491 smul_widen_optab
= init_optab (UNKNOWN
);
5492 umul_widen_optab
= init_optab (UNKNOWN
);
5493 sdiv_optab
= init_optab (DIV
);
5494 sdivv_optab
= init_optabv (DIV
);
5495 sdivmod_optab
= init_optab (UNKNOWN
);
5496 udiv_optab
= init_optab (UDIV
);
5497 udivmod_optab
= init_optab (UNKNOWN
);
5498 smod_optab
= init_optab (MOD
);
5499 umod_optab
= init_optab (UMOD
);
5500 ftrunc_optab
= init_optab (UNKNOWN
);
5501 and_optab
= init_optab (AND
);
5502 ior_optab
= init_optab (IOR
);
5503 xor_optab
= init_optab (XOR
);
5504 ashl_optab
= init_optab (ASHIFT
);
5505 ashr_optab
= init_optab (ASHIFTRT
);
5506 lshr_optab
= init_optab (LSHIFTRT
);
5507 rotl_optab
= init_optab (ROTATE
);
5508 rotr_optab
= init_optab (ROTATERT
);
5509 smin_optab
= init_optab (SMIN
);
5510 smax_optab
= init_optab (SMAX
);
5511 umin_optab
= init_optab (UMIN
);
5512 umax_optab
= init_optab (UMAX
);
5513 pow_optab
= init_optab (UNKNOWN
);
5514 atan2_optab
= init_optab (UNKNOWN
);
5516 /* These three have codes assigned exclusively for the sake of
5518 mov_optab
= init_optab (SET
);
5519 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5520 cmp_optab
= init_optab (COMPARE
);
5522 ucmp_optab
= init_optab (UNKNOWN
);
5523 tst_optab
= init_optab (UNKNOWN
);
5524 neg_optab
= init_optab (NEG
);
5525 negv_optab
= init_optabv (NEG
);
5526 abs_optab
= init_optab (ABS
);
5527 absv_optab
= init_optabv (ABS
);
5528 addcc_optab
= init_optab (UNKNOWN
);
5529 one_cmpl_optab
= init_optab (NOT
);
5530 ffs_optab
= init_optab (FFS
);
5531 clz_optab
= init_optab (CLZ
);
5532 ctz_optab
= init_optab (CTZ
);
5533 popcount_optab
= init_optab (POPCOUNT
);
5534 parity_optab
= init_optab (PARITY
);
5535 sqrt_optab
= init_optab (SQRT
);
5536 floor_optab
= init_optab (UNKNOWN
);
5537 ceil_optab
= init_optab (UNKNOWN
);
5538 round_optab
= init_optab (UNKNOWN
);
5539 trunc_optab
= init_optab (UNKNOWN
);
5540 nearbyint_optab
= init_optab (UNKNOWN
);
5541 sin_optab
= init_optab (UNKNOWN
);
5542 cos_optab
= init_optab (UNKNOWN
);
5543 exp_optab
= init_optab (UNKNOWN
);
5544 log_optab
= init_optab (UNKNOWN
);
5545 strlen_optab
= init_optab (UNKNOWN
);
5546 cbranch_optab
= init_optab (UNKNOWN
);
5547 cmov_optab
= init_optab (UNKNOWN
);
5548 cstore_optab
= init_optab (UNKNOWN
);
5549 push_optab
= init_optab (UNKNOWN
);
5551 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5553 movstr_optab
[i
] = CODE_FOR_nothing
;
5554 clrstr_optab
[i
] = CODE_FOR_nothing
;
5556 #ifdef HAVE_SECONDARY_RELOADS
5557 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5561 /* Fill in the optabs with the insns we support. */
5564 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5565 /* This flag says the same insns that convert to a signed fixnum
5566 also convert validly to an unsigned one. */
5567 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5568 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5569 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
5572 /* Initialize the optabs with the names of the library functions. */
5573 init_integral_libfuncs (add_optab
, "add", '3');
5574 init_floating_libfuncs (add_optab
, "add", '3');
5575 init_integral_libfuncs (addv_optab
, "addv", '3');
5576 init_floating_libfuncs (addv_optab
, "add", '3');
5577 init_integral_libfuncs (sub_optab
, "sub", '3');
5578 init_floating_libfuncs (sub_optab
, "sub", '3');
5579 init_integral_libfuncs (subv_optab
, "subv", '3');
5580 init_floating_libfuncs (subv_optab
, "sub", '3');
5581 init_integral_libfuncs (smul_optab
, "mul", '3');
5582 init_floating_libfuncs (smul_optab
, "mul", '3');
5583 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5584 init_floating_libfuncs (smulv_optab
, "mul", '3');
5585 init_integral_libfuncs (sdiv_optab
, "div", '3');
5586 init_floating_libfuncs (sdiv_optab
, "div", '3');
5587 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5588 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5589 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5590 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5591 init_integral_libfuncs (smod_optab
, "mod", '3');
5592 init_integral_libfuncs (umod_optab
, "umod", '3');
5593 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5594 init_integral_libfuncs (and_optab
, "and", '3');
5595 init_integral_libfuncs (ior_optab
, "ior", '3');
5596 init_integral_libfuncs (xor_optab
, "xor", '3');
5597 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5598 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5599 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5600 init_integral_libfuncs (smin_optab
, "min", '3');
5601 init_floating_libfuncs (smin_optab
, "min", '3');
5602 init_integral_libfuncs (smax_optab
, "max", '3');
5603 init_floating_libfuncs (smax_optab
, "max", '3');
5604 init_integral_libfuncs (umin_optab
, "umin", '3');
5605 init_integral_libfuncs (umax_optab
, "umax", '3');
5606 init_integral_libfuncs (neg_optab
, "neg", '2');
5607 init_floating_libfuncs (neg_optab
, "neg", '2');
5608 init_integral_libfuncs (negv_optab
, "negv", '2');
5609 init_floating_libfuncs (negv_optab
, "neg", '2');
5610 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5611 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5612 init_integral_libfuncs (clz_optab
, "clz", '2');
5613 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5614 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5615 init_integral_libfuncs (parity_optab
, "parity", '2');
5617 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5618 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5619 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5620 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5622 #ifdef MULSI3_LIBCALL
5623 smul_optab
->handlers
[(int) SImode
].libfunc
5624 = init_one_libfunc (MULSI3_LIBCALL
);
5626 #ifdef MULDI3_LIBCALL
5627 smul_optab
->handlers
[(int) DImode
].libfunc
5628 = init_one_libfunc (MULDI3_LIBCALL
);
5631 #ifdef DIVSI3_LIBCALL
5632 sdiv_optab
->handlers
[(int) SImode
].libfunc
5633 = init_one_libfunc (DIVSI3_LIBCALL
);
5635 #ifdef DIVDI3_LIBCALL
5636 sdiv_optab
->handlers
[(int) DImode
].libfunc
5637 = init_one_libfunc (DIVDI3_LIBCALL
);
5640 #ifdef UDIVSI3_LIBCALL
5641 udiv_optab
->handlers
[(int) SImode
].libfunc
5642 = init_one_libfunc (UDIVSI3_LIBCALL
);
5644 #ifdef UDIVDI3_LIBCALL
5645 udiv_optab
->handlers
[(int) DImode
].libfunc
5646 = init_one_libfunc (UDIVDI3_LIBCALL
);
5649 #ifdef MODSI3_LIBCALL
5650 smod_optab
->handlers
[(int) SImode
].libfunc
5651 = init_one_libfunc (MODSI3_LIBCALL
);
5653 #ifdef MODDI3_LIBCALL
5654 smod_optab
->handlers
[(int) DImode
].libfunc
5655 = init_one_libfunc (MODDI3_LIBCALL
);
5658 #ifdef UMODSI3_LIBCALL
5659 umod_optab
->handlers
[(int) SImode
].libfunc
5660 = init_one_libfunc (UMODSI3_LIBCALL
);
5662 #ifdef UMODDI3_LIBCALL
5663 umod_optab
->handlers
[(int) DImode
].libfunc
5664 = init_one_libfunc (UMODDI3_LIBCALL
);
5667 /* Use cabs for DC complex abs, since systems generally have cabs.
5668 Don't define any libcall for SCmode, so that cabs will be used. */
5669 abs_optab
->handlers
[(int) DCmode
].libfunc
5670 = init_one_libfunc ("cabs");
5672 /* The ffs function operates on `int'. */
5673 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5674 = init_one_libfunc ("ffs");
5676 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
5677 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
5678 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
5679 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
5680 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
5682 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
5683 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
5684 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
5685 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
5686 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
5688 abort_libfunc
= init_one_libfunc ("abort");
5689 memcpy_libfunc
= init_one_libfunc ("memcpy");
5690 memmove_libfunc
= init_one_libfunc ("memmove");
5691 bcopy_libfunc
= init_one_libfunc ("bcopy");
5692 memcmp_libfunc
= init_one_libfunc ("memcmp");
5693 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5694 memset_libfunc
= init_one_libfunc ("memset");
5695 bzero_libfunc
= init_one_libfunc ("bzero");
5696 setbits_libfunc
= init_one_libfunc ("__setbits");
5698 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5699 ? "_Unwind_SjLj_Resume"
5700 : "_Unwind_Resume");
5701 #ifndef DONT_USE_BUILTIN_SETJMP
5702 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5703 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5705 setjmp_libfunc
= init_one_libfunc ("setjmp");
5706 longjmp_libfunc
= init_one_libfunc ("longjmp");
5708 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5709 unwind_sjlj_unregister_libfunc
5710 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5712 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
5713 nehf2_libfunc
= init_one_libfunc ("__nehf2");
5714 gthf2_libfunc
= init_one_libfunc ("__gthf2");
5715 gehf2_libfunc
= init_one_libfunc ("__gehf2");
5716 lthf2_libfunc
= init_one_libfunc ("__lthf2");
5717 lehf2_libfunc
= init_one_libfunc ("__lehf2");
5718 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
5720 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
5721 nesf2_libfunc
= init_one_libfunc ("__nesf2");
5722 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
5723 gesf2_libfunc
= init_one_libfunc ("__gesf2");
5724 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
5725 lesf2_libfunc
= init_one_libfunc ("__lesf2");
5726 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
5728 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
5729 nedf2_libfunc
= init_one_libfunc ("__nedf2");
5730 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
5731 gedf2_libfunc
= init_one_libfunc ("__gedf2");
5732 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
5733 ledf2_libfunc
= init_one_libfunc ("__ledf2");
5734 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
5736 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
5737 nexf2_libfunc
= init_one_libfunc ("__nexf2");
5738 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
5739 gexf2_libfunc
= init_one_libfunc ("__gexf2");
5740 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
5741 lexf2_libfunc
= init_one_libfunc ("__lexf2");
5742 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
5744 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
5745 netf2_libfunc
= init_one_libfunc ("__netf2");
5746 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5747 getf2_libfunc
= init_one_libfunc ("__getf2");
5748 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5749 letf2_libfunc
= init_one_libfunc ("__letf2");
5750 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5752 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5753 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5754 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5756 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5757 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5758 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5760 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5761 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5762 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5764 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5765 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5766 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5768 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5769 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5770 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5772 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5773 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5774 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5776 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5777 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5778 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5780 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5781 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5782 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5784 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5785 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5786 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5788 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5789 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5790 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5792 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5793 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5794 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5796 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5797 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5798 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5800 /* For function entry/exit instrumentation. */
5801 profile_function_entry_libfunc
5802 = init_one_libfunc ("__cyg_profile_func_enter");
5803 profile_function_exit_libfunc
5804 = init_one_libfunc ("__cyg_profile_func_exit");
5806 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5807 gcov_init_libfunc
= init_one_libfunc ("__gcov_init");
5809 #ifdef HAVE_conditional_trap
5813 #ifdef INIT_TARGET_OPTABS
5814 /* Allow the target to add more libcalls or rename some, etc. */
5819 static GTY(()) rtx trap_rtx
;
5821 #ifdef HAVE_conditional_trap
5822 /* The insn generating function can not take an rtx_code argument.
5823 TRAP_RTX is used as an rtx argument. Its code is replaced with
5824 the code to be used in the trap insn and all other fields are
5830 if (HAVE_conditional_trap
)
5832 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5837 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5838 CODE. Return 0 on failure. */
5841 gen_cond_trap (code
, op1
, op2
, tcode
)
5842 enum rtx_code code ATTRIBUTE_UNUSED
;
5843 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5845 enum machine_mode mode
= GET_MODE (op1
);
5847 if (mode
== VOIDmode
)
5850 #ifdef HAVE_conditional_trap
5851 if (HAVE_conditional_trap
5852 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5856 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5857 PUT_CODE (trap_rtx
, code
);
5858 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5862 insn
= get_insns ();
5872 #include "gt-optabs.h"