1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
54 See expr.h for documentation of these optabs. */
56 optab optab_table
[OTI_MAX
];
58 rtx libfunc_table
[LTI_MAX
];
60 /* Tables of patterns for extending one integer mode to another. */
61 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
63 /* Tables of patterns for converting between fixed and floating point. */
64 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
66 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
68 /* Contains the optab used for each rtx code. */
69 optab code_to_optab
[NUM_RTX_CODE
+ 1];
71 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
72 gives the gen_function to make a branch to test that condition. */
74 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
76 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
77 gives the insn code to make a store-condition insn
78 to test that condition. */
80 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
82 #ifdef HAVE_conditional_move
83 /* Indexed by the machine mode, gives the insn code to make a conditional
84 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
85 setcc_gen_code to cut down on the number of named patterns. Consider a day
86 when a lot more rtx codes are conditional (eg: for the ARM). */
88 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
91 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
92 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
93 enum machine_mode
, int, int));
94 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
95 rtx
, rtx
, enum machine_mode
,
96 int, enum optab_methods
,
97 enum mode_class
, optab
));
98 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
99 rtx
, rtx
, enum machine_mode
,
100 int, enum optab_methods
,
101 enum mode_class
, optab
));
102 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
));
105 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
107 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
110 static rtx ftruncify
PARAMS ((rtx
));
111 static optab new_optab
PARAMS ((void));
112 static inline optab init_optab
PARAMS ((enum rtx_code
));
113 static inline optab init_optabv
PARAMS ((enum rtx_code
));
114 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
115 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
116 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
117 #ifdef HAVE_conditional_trap
118 static void init_traps
PARAMS ((void));
120 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
121 enum rtx_code
, int, rtx
));
122 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
123 enum machine_mode
*, int *));
124 static rtx expand_vector_binop
PARAMS ((enum machine_mode
, optab
,
126 enum optab_methods
));
127 static rtx expand_vector_unop
PARAMS ((enum machine_mode
, optab
, rtx
, rtx
,
130 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
131 the result of operation CODE applied to OP0 (and OP1 if it is a binary
134 If the last insn does not set TARGET, don't do anything, but return 1.
136 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
137 don't add the REG_EQUAL note but return 0. Our caller can then try
138 again, ensuring that TARGET is not one of the operands. */
141 add_equal_note (insns
, target
, code
, op0
, op1
)
147 rtx last_insn
, insn
, set
;
152 || NEXT_INSN (insns
) == NULL_RTX
)
155 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
156 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
159 if (GET_CODE (target
) == ZERO_EXTRACT
)
162 for (last_insn
= insns
;
163 NEXT_INSN (last_insn
) != NULL_RTX
;
164 last_insn
= NEXT_INSN (last_insn
))
167 set
= single_set (last_insn
);
171 if (! rtx_equal_p (SET_DEST (set
), target
)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
174 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
175 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == '1')
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
213 enum machine_mode mode
, oldmode
;
219 /* If we don't have to extend and this is a constant, return it. */
220 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
223 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
224 extend since it will be more efficient to do so unless the signedness of
225 a promoted object differs from our extension. */
227 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
228 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
229 return convert_modes (mode
, oldmode
, op
, unsignedp
);
231 /* If MODE is no wider than a single word, we return a paradoxical
233 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
234 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
236 /* Otherwise, get an object of MODE, clobber it, and set the low-order
239 result
= gen_reg_rtx (mode
);
240 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
241 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
245 /* Generate code to perform a straightforward complex divide. */
248 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
249 unsignedp
, methods
, class, binoptab
)
250 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
251 enum machine_mode submode
;
253 enum optab_methods methods
;
254 enum mode_class
class;
261 optab this_add_optab
= add_optab
;
262 optab this_sub_optab
= sub_optab
;
263 optab this_neg_optab
= neg_optab
;
264 optab this_mul_optab
= smul_optab
;
266 if (binoptab
== sdivv_optab
)
268 this_add_optab
= addv_optab
;
269 this_sub_optab
= subv_optab
;
270 this_neg_optab
= negv_optab
;
271 this_mul_optab
= smulv_optab
;
274 /* Don't fetch these from memory more than once. */
275 real0
= force_reg (submode
, real0
);
276 real1
= force_reg (submode
, real1
);
279 imag0
= force_reg (submode
, imag0
);
281 imag1
= force_reg (submode
, imag1
);
283 /* Divisor: c*c + d*d. */
284 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
285 NULL_RTX
, unsignedp
, methods
);
287 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
288 NULL_RTX
, unsignedp
, methods
);
290 if (temp1
== 0 || temp2
== 0)
293 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
294 NULL_RTX
, unsignedp
, methods
);
300 /* Mathematically, ((a)(c-id))/divisor. */
301 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
303 /* Calculate the dividend. */
304 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
305 NULL_RTX
, unsignedp
, methods
);
307 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
308 NULL_RTX
, unsignedp
, methods
);
310 if (real_t
== 0 || imag_t
== 0)
313 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
314 NULL_RTX
, unsignedp
);
318 /* Mathematically, ((a+ib)(c-id))/divider. */
319 /* Calculate the dividend. */
320 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
321 NULL_RTX
, unsignedp
, methods
);
323 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
324 NULL_RTX
, unsignedp
, methods
);
326 if (temp1
== 0 || temp2
== 0)
329 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
330 NULL_RTX
, unsignedp
, methods
);
332 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
333 NULL_RTX
, unsignedp
, methods
);
335 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
336 NULL_RTX
, unsignedp
, methods
);
338 if (temp1
== 0 || temp2
== 0)
341 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
342 NULL_RTX
, unsignedp
, methods
);
344 if (real_t
== 0 || imag_t
== 0)
348 if (class == MODE_COMPLEX_FLOAT
)
349 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
350 realr
, unsignedp
, methods
);
352 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
353 real_t
, divisor
, realr
, unsignedp
);
359 emit_move_insn (realr
, res
);
361 if (class == MODE_COMPLEX_FLOAT
)
362 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
363 imagr
, unsignedp
, methods
);
365 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
366 imag_t
, divisor
, imagr
, unsignedp
);
372 emit_move_insn (imagr
, res
);
377 /* Generate code to perform a wide-input-range-acceptable complex divide. */
380 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
381 unsignedp
, methods
, class, binoptab
)
382 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
383 enum machine_mode submode
;
385 enum optab_methods methods
;
386 enum mode_class
class;
391 rtx temp1
, temp2
, lab1
, lab2
;
392 enum machine_mode mode
;
394 optab this_add_optab
= add_optab
;
395 optab this_sub_optab
= sub_optab
;
396 optab this_neg_optab
= neg_optab
;
397 optab this_mul_optab
= smul_optab
;
399 if (binoptab
== sdivv_optab
)
401 this_add_optab
= addv_optab
;
402 this_sub_optab
= subv_optab
;
403 this_neg_optab
= negv_optab
;
404 this_mul_optab
= smulv_optab
;
407 /* Don't fetch these from memory more than once. */
408 real0
= force_reg (submode
, real0
);
409 real1
= force_reg (submode
, real1
);
412 imag0
= force_reg (submode
, imag0
);
414 imag1
= force_reg (submode
, imag1
);
416 /* XXX What's an "unsigned" complex number? */
424 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
425 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
428 if (temp1
== 0 || temp2
== 0)
431 mode
= GET_MODE (temp1
);
432 lab1
= gen_label_rtx ();
433 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
434 mode
, unsignedp
, lab1
);
436 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
438 if (class == MODE_COMPLEX_FLOAT
)
439 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
440 NULL_RTX
, unsignedp
, methods
);
442 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
443 imag1
, real1
, NULL_RTX
, unsignedp
);
448 /* Calculate divisor. */
450 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
451 NULL_RTX
, unsignedp
, methods
);
456 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
457 NULL_RTX
, unsignedp
, methods
);
462 /* Calculate dividend. */
468 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
470 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
471 NULL_RTX
, unsignedp
, methods
);
476 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
477 NULL_RTX
, unsignedp
);
479 if (real_t
== 0 || imag_t
== 0)
484 /* Compute (a+ib)/(c+id) as
485 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
487 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
488 NULL_RTX
, unsignedp
, methods
);
493 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
494 NULL_RTX
, unsignedp
, methods
);
496 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
497 NULL_RTX
, unsignedp
, methods
);
502 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
503 NULL_RTX
, unsignedp
, methods
);
505 if (real_t
== 0 || imag_t
== 0)
509 if (class == MODE_COMPLEX_FLOAT
)
510 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
511 realr
, unsignedp
, methods
);
513 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
514 real_t
, divisor
, realr
, unsignedp
);
520 emit_move_insn (realr
, res
);
522 if (class == MODE_COMPLEX_FLOAT
)
523 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
524 imagr
, unsignedp
, methods
);
526 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
527 imag_t
, divisor
, imagr
, unsignedp
);
533 emit_move_insn (imagr
, res
);
535 lab2
= gen_label_rtx ();
536 emit_jump_insn (gen_jump (lab2
));
541 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
543 if (class == MODE_COMPLEX_FLOAT
)
544 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
545 NULL_RTX
, unsignedp
, methods
);
547 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
548 real1
, imag1
, NULL_RTX
, unsignedp
);
553 /* Calculate divisor. */
555 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
556 NULL_RTX
, unsignedp
, methods
);
561 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
562 NULL_RTX
, unsignedp
, methods
);
567 /* Calculate dividend. */
571 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
573 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
574 NULL_RTX
, unsignedp
, methods
);
576 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
577 NULL_RTX
, unsignedp
);
579 if (real_t
== 0 || imag_t
== 0)
584 /* Compute (a+ib)/(c+id) as
585 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
587 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
588 NULL_RTX
, unsignedp
, methods
);
593 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
594 NULL_RTX
, unsignedp
, methods
);
596 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
597 NULL_RTX
, unsignedp
, methods
);
602 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
603 NULL_RTX
, unsignedp
, methods
);
605 if (real_t
== 0 || imag_t
== 0)
609 if (class == MODE_COMPLEX_FLOAT
)
610 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
611 realr
, unsignedp
, methods
);
613 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
614 real_t
, divisor
, realr
, unsignedp
);
620 emit_move_insn (realr
, res
);
622 if (class == MODE_COMPLEX_FLOAT
)
623 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
624 imagr
, unsignedp
, methods
);
626 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
627 imag_t
, divisor
, imagr
, unsignedp
);
633 emit_move_insn (imagr
, res
);
640 /* Wrapper around expand_binop which takes an rtx code to specify
641 the operation to perform, not an optab pointer. All other
642 arguments are the same. */
644 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
645 enum machine_mode mode
;
650 enum optab_methods methods
;
652 optab binop
= code_to_optab
[(int) code
];
656 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
659 /* Generate code to perform an operation specified by BINOPTAB
660 on operands OP0 and OP1, with result having machine-mode MODE.
662 UNSIGNEDP is for the case where we have to widen the operands
663 to perform the operation. It says to use zero-extension.
665 If TARGET is nonzero, the value
666 is generated there, if it is convenient to do so.
667 In all cases an rtx is returned for the locus of the value;
668 this may or may not be TARGET. */
671 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
672 enum machine_mode mode
;
677 enum optab_methods methods
;
679 enum optab_methods next_methods
680 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
681 ? OPTAB_WIDEN
: methods
);
682 enum mode_class
class;
683 enum machine_mode wider_mode
;
685 int commutative_op
= 0;
686 int shift_op
= (binoptab
->code
== ASHIFT
687 || binoptab
->code
== ASHIFTRT
688 || binoptab
->code
== LSHIFTRT
689 || binoptab
->code
== ROTATE
690 || binoptab
->code
== ROTATERT
);
691 rtx entry_last
= get_last_insn ();
694 class = GET_MODE_CLASS (mode
);
696 op0
= protect_from_queue (op0
, 0);
697 op1
= protect_from_queue (op1
, 0);
699 target
= protect_from_queue (target
, 1);
703 op0
= force_not_mem (op0
);
704 op1
= force_not_mem (op1
);
707 /* If subtracting an integer constant, convert this into an addition of
708 the negated constant. */
710 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
712 op1
= negate_rtx (mode
, op1
);
713 binoptab
= add_optab
;
716 /* If we are inside an appropriately-short loop and one operand is an
717 expensive constant, force it into a register. */
718 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
719 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
720 op0
= force_reg (mode
, op0
);
722 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
723 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
724 op1
= force_reg (mode
, op1
);
726 /* Record where to delete back to if we backtrack. */
727 last
= get_last_insn ();
729 /* If operation is commutative,
730 try to make the first operand a register.
731 Even better, try to make it the same as the target.
732 Also try to make the last operand a constant. */
733 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
734 || binoptab
== smul_widen_optab
735 || binoptab
== umul_widen_optab
736 || binoptab
== smul_highpart_optab
737 || binoptab
== umul_highpart_optab
)
741 if (((target
== 0 || GET_CODE (target
) == REG
)
742 ? ((GET_CODE (op1
) == REG
743 && GET_CODE (op0
) != REG
)
745 : rtx_equal_p (op1
, target
))
746 || GET_CODE (op0
) == CONST_INT
)
754 /* If we can do it with a three-operand insn, do so. */
756 if (methods
!= OPTAB_MUST_WIDEN
757 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
759 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
760 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
761 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
763 rtx xop0
= op0
, xop1
= op1
;
768 temp
= gen_reg_rtx (mode
);
770 /* If it is a commutative operator and the modes would match
771 if we would swap the operands, we can save the conversions. */
774 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
775 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
779 tmp
= op0
; op0
= op1
; op1
= tmp
;
780 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
784 /* In case the insn wants input operands in modes different from
785 those of the actual operands, convert the operands. It would
786 seem that we don't need to convert CONST_INTs, but we do, so
787 that they're properly zero-extended, sign-extended or truncated
790 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
791 xop0
= convert_modes (mode0
,
792 GET_MODE (op0
) != VOIDmode
797 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
798 xop1
= convert_modes (mode1
,
799 GET_MODE (op1
) != VOIDmode
804 /* Now, if insn's predicates don't allow our operands, put them into
807 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
808 && mode0
!= VOIDmode
)
809 xop0
= copy_to_mode_reg (mode0
, xop0
);
811 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
812 && mode1
!= VOIDmode
)
813 xop1
= copy_to_mode_reg (mode1
, xop1
);
815 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
816 temp
= gen_reg_rtx (mode
);
818 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
821 /* If PAT is composed of more than one insn, try to add an appropriate
822 REG_EQUAL note to it. If we can't because TEMP conflicts with an
823 operand, call ourselves again, this time without a target. */
824 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
825 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
827 delete_insns_since (last
);
828 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
836 delete_insns_since (last
);
839 /* If this is a multiply, see if we can do a widening operation that
840 takes operands of this mode and makes a wider mode. */
842 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
843 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
844 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
845 != CODE_FOR_nothing
))
847 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
848 unsignedp
? umul_widen_optab
: smul_widen_optab
,
849 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
853 if (GET_MODE_CLASS (mode
) == MODE_INT
)
854 return gen_lowpart (mode
, temp
);
856 return convert_to_mode (mode
, temp
, unsignedp
);
860 /* Look for a wider mode of the same class for which we think we
861 can open-code the operation. Check for a widening multiply at the
862 wider mode as well. */
864 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
865 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
866 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
867 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
869 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
870 || (binoptab
== smul_optab
871 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
872 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
873 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
874 != CODE_FOR_nothing
)))
876 rtx xop0
= op0
, xop1
= op1
;
879 /* For certain integer operations, we need not actually extend
880 the narrow operands, as long as we will truncate
881 the results to the same narrowness. */
883 if ((binoptab
== ior_optab
|| binoptab
== and_optab
884 || binoptab
== xor_optab
885 || binoptab
== add_optab
|| binoptab
== sub_optab
886 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
887 && class == MODE_INT
)
890 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
892 /* The second operand of a shift must always be extended. */
893 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
894 no_extend
&& binoptab
!= ashl_optab
);
896 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
897 unsignedp
, OPTAB_DIRECT
);
900 if (class != MODE_INT
)
903 target
= gen_reg_rtx (mode
);
904 convert_move (target
, temp
, 0);
908 return gen_lowpart (mode
, temp
);
911 delete_insns_since (last
);
915 /* These can be done a word at a time. */
916 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
918 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
919 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
925 /* If TARGET is the same as one of the operands, the REG_EQUAL note
926 won't be accurate, so use a new target. */
927 if (target
== 0 || target
== op0
|| target
== op1
)
928 target
= gen_reg_rtx (mode
);
932 /* Do the actual arithmetic. */
933 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
935 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
936 rtx x
= expand_binop (word_mode
, binoptab
,
937 operand_subword_force (op0
, i
, mode
),
938 operand_subword_force (op1
, i
, mode
),
939 target_piece
, unsignedp
, next_methods
);
944 if (target_piece
!= x
)
945 emit_move_insn (target_piece
, x
);
948 insns
= get_insns ();
951 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
953 if (binoptab
->code
!= UNKNOWN
)
955 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
956 copy_rtx (op0
), copy_rtx (op1
));
960 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
965 /* Synthesize double word shifts from single word shifts. */
966 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
967 || binoptab
== ashr_optab
)
969 && GET_CODE (op1
) == CONST_INT
970 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
971 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
972 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
973 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
975 rtx insns
, inter
, equiv_value
;
976 rtx into_target
, outof_target
;
977 rtx into_input
, outof_input
;
978 int shift_count
, left_shift
, outof_word
;
980 /* If TARGET is the same as one of the operands, the REG_EQUAL note
981 won't be accurate, so use a new target. */
982 if (target
== 0 || target
== op0
|| target
== op1
)
983 target
= gen_reg_rtx (mode
);
987 shift_count
= INTVAL (op1
);
989 /* OUTOF_* is the word we are shifting bits away from, and
990 INTO_* is the word that we are shifting bits towards, thus
991 they differ depending on the direction of the shift and
994 left_shift
= binoptab
== ashl_optab
;
995 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
997 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
998 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1000 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1001 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1003 if (shift_count
>= BITS_PER_WORD
)
1005 inter
= expand_binop (word_mode
, binoptab
,
1007 GEN_INT (shift_count
- BITS_PER_WORD
),
1008 into_target
, unsignedp
, next_methods
);
1010 if (inter
!= 0 && inter
!= into_target
)
1011 emit_move_insn (into_target
, inter
);
1013 /* For a signed right shift, we must fill the word we are shifting
1014 out of with copies of the sign bit. Otherwise it is zeroed. */
1015 if (inter
!= 0 && binoptab
!= ashr_optab
)
1016 inter
= CONST0_RTX (word_mode
);
1017 else if (inter
!= 0)
1018 inter
= expand_binop (word_mode
, binoptab
,
1020 GEN_INT (BITS_PER_WORD
- 1),
1021 outof_target
, unsignedp
, next_methods
);
1023 if (inter
!= 0 && inter
!= outof_target
)
1024 emit_move_insn (outof_target
, inter
);
1029 optab reverse_unsigned_shift
, unsigned_shift
;
1031 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1032 we must do a logical shift in the opposite direction of the
1035 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1037 /* For a shift of less than BITS_PER_WORD, to compute the word
1038 shifted towards, we need to unsigned shift the orig value of
1041 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1043 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1045 GEN_INT (BITS_PER_WORD
- shift_count
),
1046 0, unsignedp
, next_methods
);
1051 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1052 op1
, 0, unsignedp
, next_methods
);
1055 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1056 into_target
, unsignedp
, next_methods
);
1058 if (inter
!= 0 && inter
!= into_target
)
1059 emit_move_insn (into_target
, inter
);
1062 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1063 op1
, outof_target
, unsignedp
, next_methods
);
1065 if (inter
!= 0 && inter
!= outof_target
)
1066 emit_move_insn (outof_target
, inter
);
1069 insns
= get_insns ();
1074 if (binoptab
->code
!= UNKNOWN
)
1075 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1079 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1084 /* Synthesize double word rotates from single word shifts. */
1085 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1086 && class == MODE_INT
1087 && GET_CODE (op1
) == CONST_INT
1088 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1089 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1090 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1092 rtx insns
, equiv_value
;
1093 rtx into_target
, outof_target
;
1094 rtx into_input
, outof_input
;
1096 int shift_count
, left_shift
, outof_word
;
1098 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1099 won't be accurate, so use a new target. */
1100 if (target
== 0 || target
== op0
|| target
== op1
)
1101 target
= gen_reg_rtx (mode
);
1105 shift_count
= INTVAL (op1
);
1107 /* OUTOF_* is the word we are shifting bits away from, and
1108 INTO_* is the word that we are shifting bits towards, thus
1109 they differ depending on the direction of the shift and
1110 WORDS_BIG_ENDIAN. */
1112 left_shift
= (binoptab
== rotl_optab
);
1113 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1115 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1116 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1118 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1119 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1121 if (shift_count
== BITS_PER_WORD
)
1123 /* This is just a word swap. */
1124 emit_move_insn (outof_target
, into_input
);
1125 emit_move_insn (into_target
, outof_input
);
1130 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1131 rtx first_shift_count
, second_shift_count
;
1132 optab reverse_unsigned_shift
, unsigned_shift
;
1134 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1135 ? lshr_optab
: ashl_optab
);
1137 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1138 ? ashl_optab
: lshr_optab
);
1140 if (shift_count
> BITS_PER_WORD
)
1142 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1143 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1147 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1148 second_shift_count
= GEN_INT (shift_count
);
1151 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1152 outof_input
, first_shift_count
,
1153 NULL_RTX
, unsignedp
, next_methods
);
1154 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1155 into_input
, second_shift_count
,
1156 NULL_RTX
, unsignedp
, next_methods
);
1158 if (into_temp1
!= 0 && into_temp2
!= 0)
1159 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1160 into_target
, unsignedp
, next_methods
);
1164 if (inter
!= 0 && inter
!= into_target
)
1165 emit_move_insn (into_target
, inter
);
1167 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1168 into_input
, first_shift_count
,
1169 NULL_RTX
, unsignedp
, next_methods
);
1170 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1171 outof_input
, second_shift_count
,
1172 NULL_RTX
, unsignedp
, next_methods
);
1174 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1175 inter
= expand_binop (word_mode
, ior_optab
,
1176 outof_temp1
, outof_temp2
,
1177 outof_target
, unsignedp
, next_methods
);
1179 if (inter
!= 0 && inter
!= outof_target
)
1180 emit_move_insn (outof_target
, inter
);
1183 insns
= get_insns ();
1188 if (binoptab
->code
!= UNKNOWN
)
1189 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1193 /* We can't make this a no conflict block if this is a word swap,
1194 because the word swap case fails if the input and output values
1195 are in the same register. */
1196 if (shift_count
!= BITS_PER_WORD
)
1197 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1206 /* These can be done a word at a time by propagating carries. */
1207 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1208 && class == MODE_INT
1209 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1210 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1213 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1214 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1215 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1216 rtx xop0
, xop1
, xtarget
;
1218 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1219 value is one of those, use it. Otherwise, use 1 since it is the
1220 one easiest to get. */
1221 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1222 int normalizep
= STORE_FLAG_VALUE
;
1227 /* Prepare the operands. */
1228 xop0
= force_reg (mode
, op0
);
1229 xop1
= force_reg (mode
, op1
);
1231 xtarget
= gen_reg_rtx (mode
);
1233 if (target
== 0 || GET_CODE (target
) != REG
)
1236 /* Indicate for flow that the entire target reg is being set. */
1237 if (GET_CODE (target
) == REG
)
1238 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1240 /* Do the actual arithmetic. */
1241 for (i
= 0; i
< nwords
; i
++)
1243 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1244 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1245 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1246 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1249 /* Main add/subtract of the input operands. */
1250 x
= expand_binop (word_mode
, binoptab
,
1251 op0_piece
, op1_piece
,
1252 target_piece
, unsignedp
, next_methods
);
1258 /* Store carry from main add/subtract. */
1259 carry_out
= gen_reg_rtx (word_mode
);
1260 carry_out
= emit_store_flag_force (carry_out
,
1261 (binoptab
== add_optab
1264 word_mode
, 1, normalizep
);
1271 /* Add/subtract previous carry to main result. */
1272 newx
= expand_binop (word_mode
,
1273 normalizep
== 1 ? binoptab
: otheroptab
,
1275 NULL_RTX
, 1, next_methods
);
1279 /* Get out carry from adding/subtracting carry in. */
1280 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1281 carry_tmp
= emit_store_flag_force (carry_tmp
,
1282 (binoptab
== add_optab
1285 word_mode
, 1, normalizep
);
1287 /* Logical-ior the two poss. carry together. */
1288 carry_out
= expand_binop (word_mode
, ior_optab
,
1289 carry_out
, carry_tmp
,
1290 carry_out
, 0, next_methods
);
1294 emit_move_insn (target_piece
, newx
);
1297 carry_in
= carry_out
;
1300 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1302 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1304 rtx temp
= emit_move_insn (target
, xtarget
);
1306 set_unique_reg_note (temp
,
1308 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1319 delete_insns_since (last
);
1322 /* If we want to multiply two two-word values and have normal and widening
1323 multiplies of single-word values, we can do this with three smaller
1324 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1325 because we are not operating on one word at a time.
1327 The multiplication proceeds as follows:
1328 _______________________
1329 [__op0_high_|__op0_low__]
1330 _______________________
1331 * [__op1_high_|__op1_low__]
1332 _______________________________________________
1333 _______________________
1334 (1) [__op0_low__*__op1_low__]
1335 _______________________
1336 (2a) [__op0_low__*__op1_high_]
1337 _______________________
1338 (2b) [__op0_high_*__op1_low__]
1339 _______________________
1340 (3) [__op0_high_*__op1_high_]
1343 This gives a 4-word result. Since we are only interested in the
1344 lower 2 words, partial result (3) and the upper words of (2a) and
1345 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1346 calculated using non-widening multiplication.
1348 (1), however, needs to be calculated with an unsigned widening
1349 multiplication. If this operation is not directly supported we
1350 try using a signed widening multiplication and adjust the result.
1351 This adjustment works as follows:
1353 If both operands are positive then no adjustment is needed.
1355 If the operands have different signs, for example op0_low < 0 and
1356 op1_low >= 0, the instruction treats the most significant bit of
1357 op0_low as a sign bit instead of a bit with significance
1358 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1359 with 2**BITS_PER_WORD - op0_low, and two's complements the
1360 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1363 Similarly, if both operands are negative, we need to add
1364 (op0_low + op1_low) * 2**BITS_PER_WORD.
1366 We use a trick to adjust quickly. We logically shift op0_low right
1367 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1368 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1369 logical shift exists, we do an arithmetic right shift and subtract
1372 if (binoptab
== smul_optab
1373 && class == MODE_INT
1374 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1375 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1376 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1377 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1378 != CODE_FOR_nothing
)
1379 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1380 != CODE_FOR_nothing
)))
1382 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1383 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1384 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1385 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1386 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1387 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1389 rtx op0_xhigh
= NULL_RTX
;
1390 rtx op1_xhigh
= NULL_RTX
;
1392 /* If the target is the same as one of the inputs, don't use it. This
1393 prevents problems with the REG_EQUAL note. */
1394 if (target
== op0
|| target
== op1
1395 || (target
!= 0 && GET_CODE (target
) != REG
))
1398 /* Multiply the two lower words to get a double-word product.
1399 If unsigned widening multiplication is available, use that;
1400 otherwise use the signed form and compensate. */
1402 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1404 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1405 target
, 1, OPTAB_DIRECT
);
1407 /* If we didn't succeed, delete everything we did so far. */
1409 delete_insns_since (last
);
1411 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1415 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1416 != CODE_FOR_nothing
)
1418 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1419 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1420 target
, 1, OPTAB_DIRECT
);
1421 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1422 NULL_RTX
, 1, next_methods
);
1424 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1425 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1428 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1429 NULL_RTX
, 0, next_methods
);
1431 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1432 op0_xhigh
, op0_xhigh
, 0,
1436 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1437 NULL_RTX
, 1, next_methods
);
1439 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1440 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1443 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1444 NULL_RTX
, 0, next_methods
);
1446 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1447 op1_xhigh
, op1_xhigh
, 0,
1452 /* If we have been able to directly compute the product of the
1453 low-order words of the operands and perform any required adjustments
1454 of the operands, we proceed by trying two more multiplications
1455 and then computing the appropriate sum.
1457 We have checked above that the required addition is provided.
1458 Full-word addition will normally always succeed, especially if
1459 it is provided at all, so we don't worry about its failure. The
1460 multiplication may well fail, however, so we do handle that. */
1462 if (product
&& op0_xhigh
&& op1_xhigh
)
1464 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1465 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1466 NULL_RTX
, 0, OPTAB_DIRECT
);
1468 if (!REG_P (product_high
))
1469 product_high
= force_reg (word_mode
, product_high
);
1472 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1473 product_high
, 0, next_methods
);
1475 if (temp
!= 0 && temp
!= product_high
)
1476 emit_move_insn (product_high
, temp
);
1479 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1480 NULL_RTX
, 0, OPTAB_DIRECT
);
1483 temp
= expand_binop (word_mode
, add_optab
, temp
,
1484 product_high
, product_high
,
1487 if (temp
!= 0 && temp
!= product_high
)
1488 emit_move_insn (product_high
, temp
);
1490 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1494 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1496 temp
= emit_move_insn (product
, product
);
1497 set_unique_reg_note (temp
,
1499 gen_rtx_fmt_ee (MULT
, mode
,
1508 /* If we get here, we couldn't do it for some reason even though we
1509 originally thought we could. Delete anything we've emitted in
1512 delete_insns_since (last
);
1515 /* Open-code the vector operations if we have no hardware support
1517 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1518 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1519 unsignedp
, methods
);
1521 /* We need to open-code the complex type operations: '+, -, * and /' */
1523 /* At this point we allow operations between two similar complex
1524 numbers, and also if one of the operands is not a complex number
1525 but rather of MODE_FLOAT or MODE_INT. However, the caller
1526 must make sure that the MODE of the non-complex operand matches
1527 the SUBMODE of the complex operand. */
1529 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1531 rtx real0
= 0, imag0
= 0;
1532 rtx real1
= 0, imag1
= 0;
1533 rtx realr
, imagr
, res
;
1538 /* Find the correct mode for the real and imaginary parts */
1539 enum machine_mode submode
= GET_MODE_INNER(mode
);
1541 if (submode
== BLKmode
)
1545 target
= gen_reg_rtx (mode
);
1549 realr
= gen_realpart (submode
, target
);
1550 imagr
= gen_imagpart (submode
, target
);
1552 if (GET_MODE (op0
) == mode
)
1554 real0
= gen_realpart (submode
, op0
);
1555 imag0
= gen_imagpart (submode
, op0
);
1560 if (GET_MODE (op1
) == mode
)
1562 real1
= gen_realpart (submode
, op1
);
1563 imag1
= gen_imagpart (submode
, op1
);
1568 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1571 switch (binoptab
->code
)
1574 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1576 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1577 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1578 realr
, unsignedp
, methods
);
1582 else if (res
!= realr
)
1583 emit_move_insn (realr
, res
);
1585 if (imag0
!= 0 && imag1
!= 0)
1586 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1587 imagr
, unsignedp
, methods
);
1588 else if (imag0
!= 0)
1590 else if (binoptab
->code
== MINUS
)
1591 res
= expand_unop (submode
,
1592 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1593 imag1
, imagr
, unsignedp
);
1599 else if (res
!= imagr
)
1600 emit_move_insn (imagr
, res
);
1606 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1608 if (imag0
!= 0 && imag1
!= 0)
1612 /* Don't fetch these from memory more than once. */
1613 real0
= force_reg (submode
, real0
);
1614 real1
= force_reg (submode
, real1
);
1615 imag0
= force_reg (submode
, imag0
);
1616 imag1
= force_reg (submode
, imag1
);
1618 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1619 unsignedp
, methods
);
1621 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1622 unsignedp
, methods
);
1624 if (temp1
== 0 || temp2
== 0)
1629 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1630 temp1
, temp2
, realr
, unsignedp
, methods
));
1634 else if (res
!= realr
)
1635 emit_move_insn (realr
, res
);
1637 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1638 NULL_RTX
, unsignedp
, methods
);
1640 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1641 NULL_RTX
, unsignedp
, methods
);
1643 if (temp1
== 0 || temp2
== 0)
1648 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1649 temp1
, temp2
, imagr
, unsignedp
, methods
));
1653 else if (res
!= imagr
)
1654 emit_move_insn (imagr
, res
);
1660 /* Don't fetch these from memory more than once. */
1661 real0
= force_reg (submode
, real0
);
1662 real1
= force_reg (submode
, real1
);
1664 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1665 realr
, unsignedp
, methods
);
1668 else if (res
!= realr
)
1669 emit_move_insn (realr
, res
);
1672 res
= expand_binop (submode
, binoptab
,
1673 real1
, imag0
, imagr
, unsignedp
, methods
);
1675 res
= expand_binop (submode
, binoptab
,
1676 real0
, imag1
, imagr
, unsignedp
, methods
);
1680 else if (res
!= imagr
)
1681 emit_move_insn (imagr
, res
);
1688 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1692 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1694 /* Don't fetch these from memory more than once. */
1695 real1
= force_reg (submode
, real1
);
1697 /* Simply divide the real and imaginary parts by `c' */
1698 if (class == MODE_COMPLEX_FLOAT
)
1699 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1700 realr
, unsignedp
, methods
);
1702 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1703 real0
, real1
, realr
, unsignedp
);
1707 else if (res
!= realr
)
1708 emit_move_insn (realr
, res
);
1710 if (class == MODE_COMPLEX_FLOAT
)
1711 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1712 imagr
, unsignedp
, methods
);
1714 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1715 imag0
, real1
, imagr
, unsignedp
);
1719 else if (res
!= imagr
)
1720 emit_move_insn (imagr
, res
);
1726 switch (flag_complex_divide_method
)
1729 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1730 realr
, imagr
, submode
,
1736 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1737 realr
, imagr
, submode
,
1757 if (binoptab
->code
!= UNKNOWN
)
1759 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1760 copy_rtx (op0
), copy_rtx (op1
));
1764 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1770 /* It can't be open-coded in this mode.
1771 Use a library call if one is available and caller says that's ok. */
1773 if (binoptab
->handlers
[(int) mode
].libfunc
1774 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1778 enum machine_mode op1_mode
= mode
;
1785 op1_mode
= word_mode
;
1786 /* Specify unsigned here,
1787 since negative shift counts are meaningless. */
1788 op1x
= convert_to_mode (word_mode
, op1
, 1);
1791 if (GET_MODE (op0
) != VOIDmode
1792 && GET_MODE (op0
) != mode
)
1793 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1795 /* Pass 1 for NO_QUEUE so we don't lose any increments
1796 if the libcall is cse'd or moved. */
1797 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1798 NULL_RTX
, LCT_CONST
, mode
, 2,
1799 op0
, mode
, op1x
, op1_mode
);
1801 insns
= get_insns ();
1804 target
= gen_reg_rtx (mode
);
1805 emit_libcall_block (insns
, target
, value
,
1806 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1811 delete_insns_since (last
);
1813 /* It can't be done in this mode. Can we do it in a wider mode? */
1815 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1816 || methods
== OPTAB_MUST_WIDEN
))
1818 /* Caller says, don't even try. */
1819 delete_insns_since (entry_last
);
1823 /* Compute the value of METHODS to pass to recursive calls.
1824 Don't allow widening to be tried recursively. */
1826 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1828 /* Look for a wider mode of the same class for which it appears we can do
1831 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1833 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1834 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1836 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1837 != CODE_FOR_nothing
)
1838 || (methods
== OPTAB_LIB
1839 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1841 rtx xop0
= op0
, xop1
= op1
;
1844 /* For certain integer operations, we need not actually extend
1845 the narrow operands, as long as we will truncate
1846 the results to the same narrowness. */
1848 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1849 || binoptab
== xor_optab
1850 || binoptab
== add_optab
|| binoptab
== sub_optab
1851 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1852 && class == MODE_INT
)
1855 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1856 unsignedp
, no_extend
);
1858 /* The second operand of a shift must always be extended. */
1859 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1860 no_extend
&& binoptab
!= ashl_optab
);
1862 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1863 unsignedp
, methods
);
1866 if (class != MODE_INT
)
1869 target
= gen_reg_rtx (mode
);
1870 convert_move (target
, temp
, 0);
1874 return gen_lowpart (mode
, temp
);
1877 delete_insns_since (last
);
1882 delete_insns_since (entry_last
);
1886 /* Like expand_binop, but for open-coding vectors binops. */
1889 expand_vector_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
1890 enum machine_mode mode
;
1895 enum optab_methods methods
;
1897 enum machine_mode submode
, tmode
;
1898 int size
, elts
, subsize
, subbitsize
, i
;
1899 rtx t
, a
, b
, res
, seq
;
1900 enum mode_class
class;
1902 class = GET_MODE_CLASS (mode
);
1904 size
= GET_MODE_SIZE (mode
);
1905 submode
= GET_MODE_INNER (mode
);
1907 /* Search for the widest vector mode with the same inner mode that is
1908 still narrower than MODE and that allows to open-code this operator.
1909 Note, if we find such a mode and the handler later decides it can't
1910 do the expansion, we'll be called recursively with the narrower mode. */
1911 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1912 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1913 tmode
= GET_MODE_WIDER_MODE (tmode
))
1915 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1916 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1920 switch (binoptab
->code
)
1925 tmode
= int_mode_for_mode (mode
);
1926 if (tmode
!= BLKmode
)
1932 subsize
= GET_MODE_SIZE (submode
);
1933 subbitsize
= GET_MODE_BITSIZE (submode
);
1934 elts
= size
/ subsize
;
1936 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1937 but that we operate on more than one element at a time. */
1938 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1943 /* Errors can leave us with a const0_rtx as operand. */
1944 if (GET_MODE (op0
) != mode
)
1945 op0
= copy_to_mode_reg (mode
, op0
);
1946 if (GET_MODE (op1
) != mode
)
1947 op1
= copy_to_mode_reg (mode
, op1
);
1950 target
= gen_reg_rtx (mode
);
1952 for (i
= 0; i
< elts
; ++i
)
1954 /* If this is part of a register, and not the first item in the
1955 word, we can't store using a SUBREG - that would clobber
1957 And storing with a SUBREG is only possible for the least
1958 significant part, hence we can't do it for big endian
1959 (unless we want to permute the evaluation order. */
1960 if (GET_CODE (target
) == REG
1961 && (BYTES_BIG_ENDIAN
1962 ? subsize
< UNITS_PER_WORD
1963 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1966 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1967 if (CONSTANT_P (op0
))
1968 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1970 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1971 NULL_RTX
, submode
, submode
, size
);
1972 if (CONSTANT_P (op1
))
1973 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1975 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1976 NULL_RTX
, submode
, submode
, size
);
1978 if (binoptab
->code
== DIV
)
1980 if (class == MODE_VECTOR_FLOAT
)
1981 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1982 unsignedp
, methods
);
1984 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1985 a
, b
, t
, unsignedp
);
1988 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1989 unsignedp
, methods
);
1995 emit_move_insn (t
, res
);
1997 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2013 /* Like expand_unop but for open-coding vector unops. */
2016 expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2017 enum machine_mode mode
;
2023 enum machine_mode submode
, tmode
;
2024 int size
, elts
, subsize
, subbitsize
, i
;
2027 size
= GET_MODE_SIZE (mode
);
2028 submode
= GET_MODE_INNER (mode
);
2030 /* Search for the widest vector mode with the same inner mode that is
2031 still narrower than MODE and that allows to open-code this operator.
2032 Note, if we find such a mode and the handler later decides it can't
2033 do the expansion, we'll be called recursively with the narrower mode. */
2034 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2035 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2036 tmode
= GET_MODE_WIDER_MODE (tmode
))
2038 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2039 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2042 /* If there is no negate operation, try doing a subtract from zero. */
2043 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2044 /* Avoid infinite recursion when an
2045 error has left us with the wrong mode. */
2046 && GET_MODE (op0
) == mode
)
2049 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2050 target
, unsignedp
, OPTAB_DIRECT
);
2055 if (unoptab
== one_cmpl_optab
)
2057 tmode
= int_mode_for_mode (mode
);
2058 if (tmode
!= BLKmode
)
2062 subsize
= GET_MODE_SIZE (submode
);
2063 subbitsize
= GET_MODE_BITSIZE (submode
);
2064 elts
= size
/ subsize
;
2066 /* Errors can leave us with a const0_rtx as operand. */
2067 if (GET_MODE (op0
) != mode
)
2068 op0
= copy_to_mode_reg (mode
, op0
);
2071 target
= gen_reg_rtx (mode
);
2075 for (i
= 0; i
< elts
; ++i
)
2077 /* If this is part of a register, and not the first item in the
2078 word, we can't store using a SUBREG - that would clobber
2080 And storing with a SUBREG is only possible for the least
2081 significant part, hence we can't do it for big endian
2082 (unless we want to permute the evaluation order. */
2083 if (GET_CODE (target
) == REG
2084 && (BYTES_BIG_ENDIAN
2085 ? subsize
< UNITS_PER_WORD
2086 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2089 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2090 if (CONSTANT_P (op0
))
2091 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2093 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2094 t
, submode
, submode
, size
);
2096 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2099 emit_move_insn (t
, res
);
2101 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2112 /* Expand a binary operator which has both signed and unsigned forms.
2113 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2116 If we widen unsigned operands, we may use a signed wider operation instead
2117 of an unsigned wider operation, since the result would be the same. */
2120 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
2121 enum machine_mode mode
;
2122 optab uoptab
, soptab
;
2123 rtx op0
, op1
, target
;
2125 enum optab_methods methods
;
2128 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2129 struct optab wide_soptab
;
2131 /* Do it without widening, if possible. */
2132 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2133 unsignedp
, OPTAB_DIRECT
);
2134 if (temp
|| methods
== OPTAB_DIRECT
)
2137 /* Try widening to a signed int. Make a fake signed optab that
2138 hides any signed insn for direct use. */
2139 wide_soptab
= *soptab
;
2140 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2141 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2143 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2144 unsignedp
, OPTAB_WIDEN
);
2146 /* For unsigned operands, try widening to an unsigned int. */
2147 if (temp
== 0 && unsignedp
)
2148 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2149 unsignedp
, OPTAB_WIDEN
);
2150 if (temp
|| methods
== OPTAB_WIDEN
)
2153 /* Use the right width lib call if that exists. */
2154 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2155 if (temp
|| methods
== OPTAB_LIB
)
2158 /* Must widen and use a lib call, use either signed or unsigned. */
2159 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2160 unsignedp
, methods
);
2164 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2165 unsignedp
, methods
);
2169 /* Generate code to perform an operation specified by BINOPTAB
2170 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2171 We assume that the order of the operands for the instruction
2172 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2173 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2175 Either TARG0 or TARG1 may be zero, but what that means is that
2176 the result is not actually wanted. We will generate it into
2177 a dummy pseudo-reg and discard it. They may not both be zero.
2179 Returns 1 if this operation can be performed; 0 if not. */
2182 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
2188 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2189 enum mode_class
class;
2190 enum machine_mode wider_mode
;
2191 rtx entry_last
= get_last_insn ();
2194 class = GET_MODE_CLASS (mode
);
2196 op0
= protect_from_queue (op0
, 0);
2197 op1
= protect_from_queue (op1
, 0);
2201 op0
= force_not_mem (op0
);
2202 op1
= force_not_mem (op1
);
2205 /* If we are inside an appropriately-short loop and one operand is an
2206 expensive constant, force it into a register. */
2207 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2208 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2209 op0
= force_reg (mode
, op0
);
2211 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2212 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2213 op1
= force_reg (mode
, op1
);
2216 targ0
= protect_from_queue (targ0
, 1);
2218 targ0
= gen_reg_rtx (mode
);
2220 targ1
= protect_from_queue (targ1
, 1);
2222 targ1
= gen_reg_rtx (mode
);
2224 /* Record where to go back to if we fail. */
2225 last
= get_last_insn ();
2227 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2229 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2230 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2231 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2233 rtx xop0
= op0
, xop1
= op1
;
2235 /* In case the insn wants input operands in modes different from
2236 those of the actual operands, convert the operands. It would
2237 seem that we don't need to convert CONST_INTs, but we do, so
2238 that they're properly zero-extended, sign-extended or truncated
2241 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2242 xop0
= convert_modes (mode0
,
2243 GET_MODE (op0
) != VOIDmode
2248 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2249 xop1
= convert_modes (mode1
,
2250 GET_MODE (op1
) != VOIDmode
2255 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2256 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2257 xop0
= copy_to_mode_reg (mode0
, xop0
);
2259 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2260 xop1
= copy_to_mode_reg (mode1
, xop1
);
2262 /* We could handle this, but we should always be called with a pseudo
2263 for our targets and all insns should take them as outputs. */
2264 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2265 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2268 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2275 delete_insns_since (last
);
2278 /* It can't be done in this mode. Can we do it in a wider mode? */
2280 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2282 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2283 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2285 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2286 != CODE_FOR_nothing
)
2288 rtx t0
= gen_reg_rtx (wider_mode
);
2289 rtx t1
= gen_reg_rtx (wider_mode
);
2290 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2291 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2293 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2296 convert_move (targ0
, t0
, unsignedp
);
2297 convert_move (targ1
, t1
, unsignedp
);
2301 delete_insns_since (last
);
2306 delete_insns_since (entry_last
);
2310 /* Wrapper around expand_unop which takes an rtx code to specify
2311 the operation to perform, not an optab pointer. All other
2312 arguments are the same. */
2314 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2315 enum machine_mode mode
;
2321 optab unop
= code_to_optab
[(int) code
];
2325 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2328 /* Generate code to perform an operation specified by UNOPTAB
2329 on operand OP0, with result having machine-mode MODE.
2331 UNSIGNEDP is for the case where we have to widen the operands
2332 to perform the operation. It says to use zero-extension.
2334 If TARGET is nonzero, the value
2335 is generated there, if it is convenient to do so.
2336 In all cases an rtx is returned for the locus of the value;
2337 this may or may not be TARGET. */
2340 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2341 enum machine_mode mode
;
2347 enum mode_class
class;
2348 enum machine_mode wider_mode
;
2350 rtx last
= get_last_insn ();
2353 class = GET_MODE_CLASS (mode
);
2355 op0
= protect_from_queue (op0
, 0);
2359 op0
= force_not_mem (op0
);
2363 target
= protect_from_queue (target
, 1);
2365 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2367 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2368 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2374 temp
= gen_reg_rtx (mode
);
2376 if (GET_MODE (xop0
) != VOIDmode
2377 && GET_MODE (xop0
) != mode0
)
2378 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2380 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2382 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2383 xop0
= copy_to_mode_reg (mode0
, xop0
);
2385 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2386 temp
= gen_reg_rtx (mode
);
2388 pat
= GEN_FCN (icode
) (temp
, xop0
);
2391 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2392 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2394 delete_insns_since (last
);
2395 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2403 delete_insns_since (last
);
2406 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2408 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2409 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2410 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2412 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2416 /* For certain operations, we need not actually extend
2417 the narrow operand, as long as we will truncate the
2418 results to the same narrowness. */
2420 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2421 (unoptab
== neg_optab
2422 || unoptab
== one_cmpl_optab
)
2423 && class == MODE_INT
);
2425 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2430 if (class != MODE_INT
)
2433 target
= gen_reg_rtx (mode
);
2434 convert_move (target
, temp
, 0);
2438 return gen_lowpart (mode
, temp
);
2441 delete_insns_since (last
);
2445 /* These can be done a word at a time. */
2446 if (unoptab
== one_cmpl_optab
2447 && class == MODE_INT
2448 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2449 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2454 if (target
== 0 || target
== op0
)
2455 target
= gen_reg_rtx (mode
);
2459 /* Do the actual arithmetic. */
2460 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2462 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2463 rtx x
= expand_unop (word_mode
, unoptab
,
2464 operand_subword_force (op0
, i
, mode
),
2465 target_piece
, unsignedp
);
2467 if (target_piece
!= x
)
2468 emit_move_insn (target_piece
, x
);
2471 insns
= get_insns ();
2474 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2475 gen_rtx_fmt_e (unoptab
->code
, mode
,
2480 /* Open-code the complex negation operation. */
2481 else if (unoptab
->code
== NEG
2482 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2488 /* Find the correct mode for the real and imaginary parts */
2489 enum machine_mode submode
= GET_MODE_INNER (mode
);
2491 if (submode
== BLKmode
)
2495 target
= gen_reg_rtx (mode
);
2499 target_piece
= gen_imagpart (submode
, target
);
2500 x
= expand_unop (submode
, unoptab
,
2501 gen_imagpart (submode
, op0
),
2502 target_piece
, unsignedp
);
2503 if (target_piece
!= x
)
2504 emit_move_insn (target_piece
, x
);
2506 target_piece
= gen_realpart (submode
, target
);
2507 x
= expand_unop (submode
, unoptab
,
2508 gen_realpart (submode
, op0
),
2509 target_piece
, unsignedp
);
2510 if (target_piece
!= x
)
2511 emit_move_insn (target_piece
, x
);
2516 emit_no_conflict_block (seq
, target
, op0
, 0,
2517 gen_rtx_fmt_e (unoptab
->code
, mode
,
2522 /* Try negating floating point values by flipping the sign bit. */
2523 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2524 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2526 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2527 enum machine_mode imode
= int_mode_for_mode (mode
);
2528 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2530 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2532 HOST_WIDE_INT hi
, lo
;
2533 rtx last
= get_last_insn ();
2535 /* Handle targets with different FP word orders. */
2536 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2538 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2539 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2540 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2543 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2546 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2550 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2553 temp
= expand_binop (imode
, xor_optab
,
2554 gen_lowpart (imode
, op0
),
2555 immed_double_const (lo
, hi
, imode
),
2556 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2558 return gen_lowpart (mode
, temp
);
2559 delete_insns_since (last
);
2563 /* Now try a library call in this mode. */
2564 if (unoptab
->handlers
[(int) mode
].libfunc
)
2571 /* Pass 1 for NO_QUEUE so we don't lose any increments
2572 if the libcall is cse'd or moved. */
2573 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2574 NULL_RTX
, LCT_CONST
, mode
, 1, op0
, mode
);
2575 insns
= get_insns ();
2578 target
= gen_reg_rtx (mode
);
2579 emit_libcall_block (insns
, target
, value
,
2580 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2585 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2586 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2588 /* It can't be done in this mode. Can we do it in a wider mode? */
2590 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2592 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2593 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2595 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2596 != CODE_FOR_nothing
)
2597 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2601 /* For certain operations, we need not actually extend
2602 the narrow operand, as long as we will truncate the
2603 results to the same narrowness. */
2605 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2606 (unoptab
== neg_optab
2607 || unoptab
== one_cmpl_optab
)
2608 && class == MODE_INT
);
2610 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2615 if (class != MODE_INT
)
2618 target
= gen_reg_rtx (mode
);
2619 convert_move (target
, temp
, 0);
2623 return gen_lowpart (mode
, temp
);
2626 delete_insns_since (last
);
2631 /* If there is no negate operation, try doing a subtract from zero.
2632 The US Software GOFAST library needs this. */
2633 if (unoptab
->code
== NEG
)
2636 temp
= expand_binop (mode
,
2637 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2638 CONST0_RTX (mode
), op0
,
2639 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2647 /* Emit code to compute the absolute value of OP0, with result to
2648 TARGET if convenient. (TARGET may be 0.) The return value says
2649 where the result actually is to be found.
2651 MODE is the mode of the operand; the mode of the result is
2652 different but can be deduced from MODE.
2657 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2658 enum machine_mode mode
;
2661 int result_unsignedp
;
2667 result_unsignedp
= 1;
2669 /* First try to do it with a special abs instruction. */
2670 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2675 /* For floating point modes, try clearing the sign bit. */
2676 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2677 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2679 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2680 enum machine_mode imode
= int_mode_for_mode (mode
);
2681 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2683 if (imode
!= BLKmode
&& bitpos
>= 0)
2685 HOST_WIDE_INT hi
, lo
;
2686 rtx last
= get_last_insn ();
2688 /* Handle targets with different FP word orders. */
2689 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2691 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2692 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2693 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2696 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2699 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2703 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2706 temp
= expand_binop (imode
, and_optab
,
2707 gen_lowpart (imode
, op0
),
2708 immed_double_const (~lo
, ~hi
, imode
),
2709 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2711 return gen_lowpart (mode
, temp
);
2712 delete_insns_since (last
);
2716 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2717 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2719 rtx last
= get_last_insn ();
2721 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2723 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2729 delete_insns_since (last
);
2732 /* If this machine has expensive jumps, we can do integer absolute
2733 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2734 where W is the width of MODE. */
2736 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2738 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2739 size_int (GET_MODE_BITSIZE (mode
) - 1),
2742 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2745 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2746 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2752 /* If that does not win, use conditional jump and negate. */
2754 /* It is safe to use the target if it is the same
2755 as the source if this is also a pseudo register */
2756 if (op0
== target
&& GET_CODE (op0
) == REG
2757 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2760 op1
= gen_label_rtx ();
2761 if (target
== 0 || ! safe
2762 || GET_MODE (target
) != mode
2763 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2764 || (GET_CODE (target
) == REG
2765 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2766 target
= gen_reg_rtx (mode
);
2768 emit_move_insn (target
, op0
);
2771 /* If this mode is an integer too wide to compare properly,
2772 compare word by word. Rely on CSE to optimize constant cases. */
2773 if (GET_MODE_CLASS (mode
) == MODE_INT
2774 && ! can_compare_p (GE
, mode
, ccp_jump
))
2775 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2778 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2779 NULL_RTX
, NULL_RTX
, op1
);
2781 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2784 emit_move_insn (target
, op0
);
2790 /* Emit code to compute the absolute value of OP0, with result to
2791 TARGET if convenient. (TARGET may be 0.) The return value says
2792 where the result actually is to be found.
2794 MODE is the mode of the operand; the mode of the result is
2795 different but can be deduced from MODE.
2797 UNSIGNEDP is relevant for complex integer modes. */
2800 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2801 enum machine_mode mode
;
2806 enum mode_class
class = GET_MODE_CLASS (mode
);
2807 enum machine_mode wider_mode
;
2809 rtx entry_last
= get_last_insn ();
2812 optab this_abs_optab
;
2814 /* Find the correct mode for the real and imaginary parts. */
2815 enum machine_mode submode
= GET_MODE_INNER (mode
);
2817 if (submode
== BLKmode
)
2820 op0
= protect_from_queue (op0
, 0);
2824 op0
= force_not_mem (op0
);
2827 last
= get_last_insn ();
2830 target
= protect_from_queue (target
, 1);
2832 this_abs_optab
= ! unsignedp
&& flag_trapv
2833 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2834 ? absv_optab
: abs_optab
;
2836 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2838 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2839 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2845 temp
= gen_reg_rtx (submode
);
2847 if (GET_MODE (xop0
) != VOIDmode
2848 && GET_MODE (xop0
) != mode0
)
2849 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2851 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2853 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2854 xop0
= copy_to_mode_reg (mode0
, xop0
);
2856 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2857 temp
= gen_reg_rtx (submode
);
2859 pat
= GEN_FCN (icode
) (temp
, xop0
);
2862 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2863 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2866 delete_insns_since (last
);
2867 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2876 delete_insns_since (last
);
2879 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2881 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2882 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2884 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2885 != CODE_FOR_nothing
)
2889 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2890 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2894 if (class != MODE_COMPLEX_INT
)
2897 target
= gen_reg_rtx (submode
);
2898 convert_move (target
, temp
, 0);
2902 return gen_lowpart (submode
, temp
);
2905 delete_insns_since (last
);
2909 /* Open-code the complex absolute-value operation
2910 if we can open-code sqrt. Otherwise it's not worth while. */
2911 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
2914 rtx real
, imag
, total
;
2916 real
= gen_realpart (submode
, op0
);
2917 imag
= gen_imagpart (submode
, op0
);
2919 /* Square both parts. */
2920 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2921 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2923 /* Sum the parts. */
2924 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2925 0, OPTAB_LIB_WIDEN
);
2927 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2928 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2930 delete_insns_since (last
);
2935 /* Now try a library call in this mode. */
2936 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
2943 /* Pass 1 for NO_QUEUE so we don't lose any increments
2944 if the libcall is cse'd or moved. */
2945 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2946 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
2947 insns
= get_insns ();
2950 target
= gen_reg_rtx (submode
);
2951 emit_libcall_block (insns
, target
, value
,
2952 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
2957 /* It can't be done in this mode. Can we do it in a wider mode? */
2959 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2960 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2962 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2963 != CODE_FOR_nothing
)
2964 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2968 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2970 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2974 if (class != MODE_COMPLEX_INT
)
2977 target
= gen_reg_rtx (submode
);
2978 convert_move (target
, temp
, 0);
2982 return gen_lowpart (submode
, temp
);
2985 delete_insns_since (last
);
2989 delete_insns_since (entry_last
);
2993 /* Generate an instruction whose insn-code is INSN_CODE,
2994 with two operands: an output TARGET and an input OP0.
2995 TARGET *must* be nonzero, and the output is always stored there.
2996 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2997 the value that is stored into TARGET. */
3000 emit_unop_insn (icode
, target
, op0
, code
)
3007 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3010 temp
= target
= protect_from_queue (target
, 1);
3012 op0
= protect_from_queue (op0
, 0);
3014 /* Sign and zero extension from memory is often done specially on
3015 RISC machines, so forcing into a register here can pessimize
3017 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3018 op0
= force_not_mem (op0
);
3020 /* Now, if insn does not accept our operands, put them into pseudos. */
3022 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3023 op0
= copy_to_mode_reg (mode0
, op0
);
3025 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3026 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3027 temp
= gen_reg_rtx (GET_MODE (temp
));
3029 pat
= GEN_FCN (icode
) (temp
, op0
);
3031 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3032 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3037 emit_move_insn (target
, temp
);
3040 /* Emit code to perform a series of operations on a multi-word quantity, one
3043 Such a block is preceded by a CLOBBER of the output, consists of multiple
3044 insns, each setting one word of the output, and followed by a SET copying
3045 the output to itself.
3047 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3048 note indicating that it doesn't conflict with the (also multi-word)
3049 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3052 INSNS is a block of code generated to perform the operation, not including
3053 the CLOBBER and final copy. All insns that compute intermediate values
3054 are first emitted, followed by the block as described above.
3056 TARGET, OP0, and OP1 are the output and inputs of the operations,
3057 respectively. OP1 may be zero for a unary operation.
3059 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3062 If TARGET is not a register, INSNS is simply emitted with no special
3063 processing. Likewise if anything in INSNS is not an INSN or if
3064 there is a libcall block inside INSNS.
3066 The final insn emitted is returned. */
3069 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
3075 rtx prev
, next
, first
, last
, insn
;
3077 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3078 return emit_insn (insns
);
3080 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3081 if (GET_CODE (insn
) != INSN
3082 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3083 return emit_insn (insns
);
3085 /* First emit all insns that do not store into words of the output and remove
3086 these from the list. */
3087 for (insn
= insns
; insn
; insn
= next
)
3092 next
= NEXT_INSN (insn
);
3094 /* Some ports (cris) create an libcall regions at their own. We must
3095 avoid any potential nesting of LIBCALLs. */
3096 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3097 remove_note (insn
, note
);
3098 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3099 remove_note (insn
, note
);
3101 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3102 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3103 set
= PATTERN (insn
);
3104 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3106 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3107 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3109 set
= XVECEXP (PATTERN (insn
), 0, i
);
3117 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3119 if (PREV_INSN (insn
))
3120 NEXT_INSN (PREV_INSN (insn
)) = next
;
3125 PREV_INSN (next
) = PREV_INSN (insn
);
3131 prev
= get_last_insn ();
3133 /* Now write the CLOBBER of the output, followed by the setting of each
3134 of the words, followed by the final copy. */
3135 if (target
!= op0
&& target
!= op1
)
3136 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3138 for (insn
= insns
; insn
; insn
= next
)
3140 next
= NEXT_INSN (insn
);
3143 if (op1
&& GET_CODE (op1
) == REG
)
3144 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3147 if (op0
&& GET_CODE (op0
) == REG
)
3148 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3152 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3153 != CODE_FOR_nothing
)
3155 last
= emit_move_insn (target
, target
);
3157 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3161 last
= get_last_insn ();
3163 /* Remove any existing REG_EQUAL note from "last", or else it will
3164 be mistaken for a note referring to the full contents of the
3165 alleged libcall value when found together with the REG_RETVAL
3166 note added below. An existing note can come from an insn
3167 expansion at "last". */
3168 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3172 first
= get_insns ();
3174 first
= NEXT_INSN (prev
);
3176 /* Encapsulate the block so it gets manipulated as a unit. */
3177 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3179 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3184 /* Emit code to make a call to a constant function or a library call.
3186 INSNS is a list containing all insns emitted in the call.
3187 These insns leave the result in RESULT. Our block is to copy RESULT
3188 to TARGET, which is logically equivalent to EQUIV.
3190 We first emit any insns that set a pseudo on the assumption that these are
3191 loading constants into registers; doing so allows them to be safely cse'ed
3192 between blocks. Then we emit all the other insns in the block, followed by
3193 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3194 note with an operand of EQUIV.
3196 Moving assignments to pseudos outside of the block is done to improve
3197 the generated code, but is not required to generate correct code,
3198 hence being unable to move an assignment is not grounds for not making
3199 a libcall block. There are two reasons why it is safe to leave these
3200 insns inside the block: First, we know that these pseudos cannot be
3201 used in generated RTL outside the block since they are created for
3202 temporary purposes within the block. Second, CSE will not record the
3203 values of anything set inside a libcall block, so we know they must
3204 be dead at the end of the block.
3206 Except for the first group of insns (the ones setting pseudos), the
3207 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3210 emit_libcall_block (insns
, target
, result
, equiv
)
3216 rtx final_dest
= target
;
3217 rtx prev
, next
, first
, last
, insn
;
3219 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3220 into a MEM later. Protect the libcall block from this change. */
3221 if (! REG_P (target
) || REG_USERVAR_P (target
))
3222 target
= gen_reg_rtx (GET_MODE (target
));
3224 /* If we're using non-call exceptions, a libcall corresponding to an
3225 operation that may trap may also trap. */
3226 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3228 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3229 if (GET_CODE (insn
) == CALL_INSN
)
3231 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3233 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3234 remove_note (insn
, note
);
3238 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3239 reg note to indicate that this call cannot throw or execute a nonlocal
3240 goto (unless there is already a REG_EH_REGION note, in which case
3242 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3243 if (GET_CODE (insn
) == CALL_INSN
)
3245 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3248 XEXP (note
, 0) = GEN_INT (-1);
3250 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3254 /* First emit all insns that set pseudos. Remove them from the list as
3255 we go. Avoid insns that set pseudos which were referenced in previous
3256 insns. These can be generated by move_by_pieces, for example,
3257 to update an address. Similarly, avoid insns that reference things
3258 set in previous insns. */
3260 for (insn
= insns
; insn
; insn
= next
)
3262 rtx set
= single_set (insn
);
3265 /* Some ports (cris) create an libcall regions at their own. We must
3266 avoid any potential nesting of LIBCALLs. */
3267 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3268 remove_note (insn
, note
);
3269 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3270 remove_note (insn
, note
);
3272 next
= NEXT_INSN (insn
);
3274 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3275 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3277 || ((! INSN_P(insns
)
3278 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3279 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3280 && ! modified_in_p (SET_SRC (set
), insns
)
3281 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3283 if (PREV_INSN (insn
))
3284 NEXT_INSN (PREV_INSN (insn
)) = next
;
3289 PREV_INSN (next
) = PREV_INSN (insn
);
3295 prev
= get_last_insn ();
3297 /* Write the remaining insns followed by the final copy. */
3299 for (insn
= insns
; insn
; insn
= next
)
3301 next
= NEXT_INSN (insn
);
3306 last
= emit_move_insn (target
, result
);
3307 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3308 != CODE_FOR_nothing
)
3309 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3312 /* Remove any existing REG_EQUAL note from "last", or else it will
3313 be mistaken for a note referring to the full contents of the
3314 libcall value when found together with the REG_RETVAL note added
3315 below. An existing note can come from an insn expansion at
3317 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3320 if (final_dest
!= target
)
3321 emit_move_insn (final_dest
, target
);
3324 first
= get_insns ();
3326 first
= NEXT_INSN (prev
);
3328 /* Encapsulate the block so it gets manipulated as a unit. */
3329 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3331 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3332 when the encapsulated region would not be in one basic block,
3333 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3335 bool attach_libcall_retval_notes
= true;
3336 next
= NEXT_INSN (last
);
3337 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3338 if (control_flow_insn_p (insn
))
3340 attach_libcall_retval_notes
= false;
3344 if (attach_libcall_retval_notes
)
3346 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3348 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3354 /* Generate code to store zero in X. */
3360 emit_move_insn (x
, const0_rtx
);
3363 /* Generate code to store 1 in X
3364 assuming it contains zero beforehand. */
3367 emit_0_to_1_insn (x
)
3370 emit_move_insn (x
, const1_rtx
);
3373 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3374 PURPOSE describes how this comparison will be used. CODE is the rtx
3375 comparison code we will be using.
3377 ??? Actually, CODE is slightly weaker than that. A target is still
3378 required to implement all of the normal bcc operations, but not
3379 required to implement all (or any) of the unordered bcc operations. */
3382 can_compare_p (code
, mode
, purpose
)
3384 enum machine_mode mode
;
3385 enum can_compare_purpose purpose
;
3389 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3391 if (purpose
== ccp_jump
)
3392 return bcc_gen_fctn
[(int) code
] != NULL
;
3393 else if (purpose
== ccp_store_flag
)
3394 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3396 /* There's only one cmov entry point, and it's allowed to fail. */
3399 if (purpose
== ccp_jump
3400 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3402 if (purpose
== ccp_cmov
3403 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3405 if (purpose
== ccp_store_flag
3406 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3409 mode
= GET_MODE_WIDER_MODE (mode
);
3411 while (mode
!= VOIDmode
);
3416 /* This function is called when we are going to emit a compare instruction that
3417 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3419 *PMODE is the mode of the inputs (in case they are const_int).
3420 *PUNSIGNEDP nonzero says that the operands are unsigned;
3421 this matters if they need to be widened.
3423 If they have mode BLKmode, then SIZE specifies the size of both operands.
3425 This function performs all the setup necessary so that the caller only has
3426 to emit a single comparison insn. This setup can involve doing a BLKmode
3427 comparison or emitting a library call to perform the comparison if no insn
3428 is available to handle it.
3429 The values which are passed in through pointers can be modified; the caller
3430 should perform the comparison on the modified values. */
3433 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3435 enum rtx_code
*pcomparison
;
3437 enum machine_mode
*pmode
;
3439 enum can_compare_purpose purpose
;
3441 enum machine_mode mode
= *pmode
;
3442 rtx x
= *px
, y
= *py
;
3443 int unsignedp
= *punsignedp
;
3444 enum mode_class
class;
3446 class = GET_MODE_CLASS (mode
);
3448 /* They could both be VOIDmode if both args are immediate constants,
3449 but we should fold that at an earlier stage.
3450 With no special code here, this will call abort,
3451 reminding the programmer to implement such folding. */
3453 if (mode
!= BLKmode
&& flag_force_mem
)
3455 x
= force_not_mem (x
);
3456 y
= force_not_mem (y
);
3459 /* If we are inside an appropriately-short loop and one operand is an
3460 expensive constant, force it into a register. */
3461 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3462 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3463 x
= force_reg (mode
, x
);
3465 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3466 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3467 y
= force_reg (mode
, y
);
3470 /* Abort if we have a non-canonical comparison. The RTL documentation
3471 states that canonical comparisons are required only for targets which
3473 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3477 /* Don't let both operands fail to indicate the mode. */
3478 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3479 x
= force_reg (mode
, x
);
3481 /* Handle all BLKmode compares. */
3483 if (mode
== BLKmode
)
3486 enum machine_mode result_mode
;
3487 rtx opalign ATTRIBUTE_UNUSED
3488 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3491 x
= protect_from_queue (x
, 0);
3492 y
= protect_from_queue (y
, 0);
3496 #ifdef HAVE_cmpstrqi
3498 && GET_CODE (size
) == CONST_INT
3499 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3501 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3502 result
= gen_reg_rtx (result_mode
);
3503 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3507 #ifdef HAVE_cmpstrhi
3509 && GET_CODE (size
) == CONST_INT
3510 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3512 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3513 result
= gen_reg_rtx (result_mode
);
3514 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3518 #ifdef HAVE_cmpstrsi
3521 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3522 result
= gen_reg_rtx (result_mode
);
3523 size
= protect_from_queue (size
, 0);
3524 emit_insn (gen_cmpstrsi (result
, x
, y
,
3525 convert_to_mode (SImode
, size
, 1),
3531 #ifdef TARGET_MEM_FUNCTIONS
3532 result
= emit_library_call_value (memcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3533 TYPE_MODE (integer_type_node
), 3,
3534 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3535 convert_to_mode (TYPE_MODE (sizetype
), size
,
3536 TREE_UNSIGNED (sizetype
)),
3537 TYPE_MODE (sizetype
));
3539 result
= emit_library_call_value (bcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3540 TYPE_MODE (integer_type_node
), 3,
3541 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3542 convert_to_mode (TYPE_MODE (integer_type_node
),
3544 TREE_UNSIGNED (integer_type_node
)),
3545 TYPE_MODE (integer_type_node
));
3548 result_mode
= TYPE_MODE (integer_type_node
);
3552 *pmode
= result_mode
;
3558 if (can_compare_p (*pcomparison
, mode
, purpose
))
3561 /* Handle a lib call just for the mode we are using. */
3563 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3565 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3568 /* If we want unsigned, and this mode has a distinct unsigned
3569 comparison routine, use that. */
3570 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3571 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3573 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3574 word_mode
, 2, x
, mode
, y
, mode
);
3576 /* Integer comparison returns a result that must be compared against 1,
3577 so that even if we do an unsigned compare afterward,
3578 there is still a value that can represent the result "less than". */
3585 if (class == MODE_FLOAT
)
3586 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3592 /* Before emitting an insn with code ICODE, make sure that X, which is going
3593 to be used for operand OPNUM of the insn, is converted from mode MODE to
3594 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3595 that it is accepted by the operand predicate. Return the new value. */
3598 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3602 enum machine_mode mode
, wider_mode
;
3605 x
= protect_from_queue (x
, 0);
3607 if (mode
!= wider_mode
)
3608 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3610 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3611 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3612 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3616 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3617 we can do the comparison.
3618 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3619 be NULL_RTX which indicates that only a comparison is to be generated. */
3622 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3624 enum machine_mode mode
;
3625 enum rtx_code comparison
;
3629 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3630 enum mode_class
class = GET_MODE_CLASS (mode
);
3631 enum machine_mode wider_mode
= mode
;
3633 /* Try combined insns first. */
3636 enum insn_code icode
;
3637 PUT_MODE (test
, wider_mode
);
3641 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3643 if (icode
!= CODE_FOR_nothing
3644 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3646 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3647 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3648 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3653 /* Handle some compares against zero. */
3654 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3655 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3657 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3658 emit_insn (GEN_FCN (icode
) (x
));
3660 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3664 /* Handle compares for which there is a directly suitable insn. */
3666 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3667 if (icode
!= CODE_FOR_nothing
)
3669 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3670 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3671 emit_insn (GEN_FCN (icode
) (x
, y
));
3673 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3677 if (class != MODE_INT
&& class != MODE_FLOAT
3678 && class != MODE_COMPLEX_FLOAT
)
3681 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3683 while (wider_mode
!= VOIDmode
);
3688 /* Generate code to compare X with Y so that the condition codes are
3689 set and to jump to LABEL if the condition is true. If X is a
3690 constant and Y is not a constant, then the comparison is swapped to
3691 ensure that the comparison RTL has the canonical form.
3693 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3694 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3695 the proper branch condition code.
3697 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3699 MODE is the mode of the inputs (in case they are const_int).
3701 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3702 be passed unchanged to emit_cmp_insn, then potentially converted into an
3703 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3706 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3708 enum rtx_code comparison
;
3710 enum machine_mode mode
;
3714 rtx op0
= x
, op1
= y
;
3716 /* Swap operands and condition to ensure canonical RTL. */
3717 if (swap_commutative_operands_p (x
, y
))
3719 /* If we're not emitting a branch, this means some caller
3725 comparison
= swap_condition (comparison
);
3729 /* If OP0 is still a constant, then both X and Y must be constants. Force
3730 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3732 if (CONSTANT_P (op0
))
3733 op0
= force_reg (mode
, op0
);
3738 comparison
= unsigned_condition (comparison
);
3740 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3742 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3745 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3748 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3750 enum rtx_code comparison
;
3752 enum machine_mode mode
;
3755 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3758 /* Emit a library call comparison between floating point X and Y.
3759 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3762 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3764 enum rtx_code
*pcomparison
;
3765 enum machine_mode
*pmode
;
3768 enum rtx_code comparison
= *pcomparison
;
3770 rtx x
= *px
= protect_from_queue (*px
, 0);
3771 rtx y
= *py
= protect_from_queue (*py
, 0);
3772 enum machine_mode mode
= GET_MODE (x
);
3780 libfunc
= eqhf2_libfunc
;
3784 libfunc
= nehf2_libfunc
;
3788 libfunc
= gthf2_libfunc
;
3789 if (libfunc
== NULL_RTX
)
3791 tmp
= x
; x
= y
; y
= tmp
;
3793 libfunc
= lthf2_libfunc
;
3798 libfunc
= gehf2_libfunc
;
3799 if (libfunc
== NULL_RTX
)
3801 tmp
= x
; x
= y
; y
= tmp
;
3803 libfunc
= lehf2_libfunc
;
3808 libfunc
= lthf2_libfunc
;
3809 if (libfunc
== NULL_RTX
)
3811 tmp
= x
; x
= y
; y
= tmp
;
3813 libfunc
= gthf2_libfunc
;
3818 libfunc
= lehf2_libfunc
;
3819 if (libfunc
== NULL_RTX
)
3821 tmp
= x
; x
= y
; y
= tmp
;
3823 libfunc
= gehf2_libfunc
;
3828 libfunc
= unordhf2_libfunc
;
3834 else if (mode
== SFmode
)
3838 libfunc
= eqsf2_libfunc
;
3842 libfunc
= nesf2_libfunc
;
3846 libfunc
= gtsf2_libfunc
;
3847 if (libfunc
== NULL_RTX
)
3849 tmp
= x
; x
= y
; y
= tmp
;
3851 libfunc
= ltsf2_libfunc
;
3856 libfunc
= gesf2_libfunc
;
3857 if (libfunc
== NULL_RTX
)
3859 tmp
= x
; x
= y
; y
= tmp
;
3861 libfunc
= lesf2_libfunc
;
3866 libfunc
= ltsf2_libfunc
;
3867 if (libfunc
== NULL_RTX
)
3869 tmp
= x
; x
= y
; y
= tmp
;
3871 libfunc
= gtsf2_libfunc
;
3876 libfunc
= lesf2_libfunc
;
3877 if (libfunc
== NULL_RTX
)
3879 tmp
= x
; x
= y
; y
= tmp
;
3881 libfunc
= gesf2_libfunc
;
3886 libfunc
= unordsf2_libfunc
;
3892 else if (mode
== DFmode
)
3896 libfunc
= eqdf2_libfunc
;
3900 libfunc
= nedf2_libfunc
;
3904 libfunc
= gtdf2_libfunc
;
3905 if (libfunc
== NULL_RTX
)
3907 tmp
= x
; x
= y
; y
= tmp
;
3909 libfunc
= ltdf2_libfunc
;
3914 libfunc
= gedf2_libfunc
;
3915 if (libfunc
== NULL_RTX
)
3917 tmp
= x
; x
= y
; y
= tmp
;
3919 libfunc
= ledf2_libfunc
;
3924 libfunc
= ltdf2_libfunc
;
3925 if (libfunc
== NULL_RTX
)
3927 tmp
= x
; x
= y
; y
= tmp
;
3929 libfunc
= gtdf2_libfunc
;
3934 libfunc
= ledf2_libfunc
;
3935 if (libfunc
== NULL_RTX
)
3937 tmp
= x
; x
= y
; y
= tmp
;
3939 libfunc
= gedf2_libfunc
;
3944 libfunc
= unorddf2_libfunc
;
3950 else if (mode
== XFmode
)
3954 libfunc
= eqxf2_libfunc
;
3958 libfunc
= nexf2_libfunc
;
3962 libfunc
= gtxf2_libfunc
;
3963 if (libfunc
== NULL_RTX
)
3965 tmp
= x
; x
= y
; y
= tmp
;
3967 libfunc
= ltxf2_libfunc
;
3972 libfunc
= gexf2_libfunc
;
3973 if (libfunc
== NULL_RTX
)
3975 tmp
= x
; x
= y
; y
= tmp
;
3977 libfunc
= lexf2_libfunc
;
3982 libfunc
= ltxf2_libfunc
;
3983 if (libfunc
== NULL_RTX
)
3985 tmp
= x
; x
= y
; y
= tmp
;
3987 libfunc
= gtxf2_libfunc
;
3992 libfunc
= lexf2_libfunc
;
3993 if (libfunc
== NULL_RTX
)
3995 tmp
= x
; x
= y
; y
= tmp
;
3997 libfunc
= gexf2_libfunc
;
4002 libfunc
= unordxf2_libfunc
;
4008 else if (mode
== TFmode
)
4012 libfunc
= eqtf2_libfunc
;
4016 libfunc
= netf2_libfunc
;
4020 libfunc
= gttf2_libfunc
;
4021 if (libfunc
== NULL_RTX
)
4023 tmp
= x
; x
= y
; y
= tmp
;
4025 libfunc
= lttf2_libfunc
;
4030 libfunc
= getf2_libfunc
;
4031 if (libfunc
== NULL_RTX
)
4033 tmp
= x
; x
= y
; y
= tmp
;
4035 libfunc
= letf2_libfunc
;
4040 libfunc
= lttf2_libfunc
;
4041 if (libfunc
== NULL_RTX
)
4043 tmp
= x
; x
= y
; y
= tmp
;
4045 libfunc
= gttf2_libfunc
;
4050 libfunc
= letf2_libfunc
;
4051 if (libfunc
== NULL_RTX
)
4053 tmp
= x
; x
= y
; y
= tmp
;
4055 libfunc
= getf2_libfunc
;
4060 libfunc
= unordtf2_libfunc
;
4068 enum machine_mode wider_mode
;
4070 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
4071 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
4073 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
4074 != CODE_FOR_nothing
)
4075 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
4077 x
= protect_from_queue (x
, 0);
4078 y
= protect_from_queue (y
, 0);
4079 *px
= convert_to_mode (wider_mode
, x
, 0);
4080 *py
= convert_to_mode (wider_mode
, y
, 0);
4081 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4091 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4092 word_mode
, 2, x
, mode
, y
, mode
);
4096 if (comparison
== UNORDERED
)
4098 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
4099 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4105 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4108 emit_indirect_jump (loc
)
4111 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
4113 loc
= copy_to_mode_reg (Pmode
, loc
);
4115 emit_jump_insn (gen_indirect_jump (loc
));
4119 #ifdef HAVE_conditional_move
4121 /* Emit a conditional move instruction if the machine supports one for that
4122 condition and machine mode.
4124 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4125 the mode to use should they be constants. If it is VOIDmode, they cannot
4128 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4129 should be stored there. MODE is the mode to use should they be constants.
4130 If it is VOIDmode, they cannot both be constants.
4132 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4133 is not supported. */
4136 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
4141 enum machine_mode cmode
;
4143 enum machine_mode mode
;
4146 rtx tem
, subtarget
, comparison
, insn
;
4147 enum insn_code icode
;
4148 enum rtx_code reversed
;
4150 /* If one operand is constant, make it the second one. Only do this
4151 if the other operand is not constant as well. */
4153 if (swap_commutative_operands_p (op0
, op1
))
4158 code
= swap_condition (code
);
4161 /* get_condition will prefer to generate LT and GT even if the old
4162 comparison was against zero, so undo that canonicalization here since
4163 comparisons against zero are cheaper. */
4164 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4165 code
= LE
, op1
= const0_rtx
;
4166 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4167 code
= GE
, op1
= const0_rtx
;
4169 if (cmode
== VOIDmode
)
4170 cmode
= GET_MODE (op0
);
4172 if (swap_commutative_operands_p (op2
, op3
)
4173 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4182 if (mode
== VOIDmode
)
4183 mode
= GET_MODE (op2
);
4185 icode
= movcc_gen_code
[mode
];
4187 if (icode
== CODE_FOR_nothing
)
4192 op2
= force_not_mem (op2
);
4193 op3
= force_not_mem (op3
);
4197 target
= protect_from_queue (target
, 1);
4199 target
= gen_reg_rtx (mode
);
4205 op2
= protect_from_queue (op2
, 0);
4206 op3
= protect_from_queue (op3
, 0);
4208 /* If the insn doesn't accept these operands, put them in pseudos. */
4210 if (! (*insn_data
[icode
].operand
[0].predicate
)
4211 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4212 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4214 if (! (*insn_data
[icode
].operand
[2].predicate
)
4215 (op2
, insn_data
[icode
].operand
[2].mode
))
4216 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4218 if (! (*insn_data
[icode
].operand
[3].predicate
)
4219 (op3
, insn_data
[icode
].operand
[3].mode
))
4220 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4222 /* Everything should now be in the suitable form, so emit the compare insn
4223 and then the conditional move. */
4226 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4228 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4229 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4230 return NULL and let the caller figure out how best to deal with this
4232 if (GET_CODE (comparison
) != code
)
4235 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4237 /* If that failed, then give up. */
4243 if (subtarget
!= target
)
4244 convert_move (target
, subtarget
, 0);
4249 /* Return nonzero if a conditional move of mode MODE is supported.
4251 This function is for combine so it can tell whether an insn that looks
4252 like a conditional move is actually supported by the hardware. If we
4253 guess wrong we lose a bit on optimization, but that's it. */
4254 /* ??? sparc64 supports conditionally moving integers values based on fp
4255 comparisons, and vice versa. How do we handle them? */
4258 can_conditionally_move_p (mode
)
4259 enum machine_mode mode
;
4261 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4267 #endif /* HAVE_conditional_move */
4269 /* Emit a conditional addition instruction if the machine supports one for that
4270 condition and machine mode.
4272 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4273 the mode to use should they be constants. If it is VOIDmode, they cannot
4276 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4277 should be stored there. MODE is the mode to use should they be constants.
4278 If it is VOIDmode, they cannot both be constants.
4280 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4281 is not supported. */
4284 emit_conditional_add (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
4289 enum machine_mode cmode
;
4291 enum machine_mode mode
;
4294 rtx tem
, subtarget
, comparison
, insn
;
4295 enum insn_code icode
;
4296 enum rtx_code reversed
;
4298 /* If one operand is constant, make it the second one. Only do this
4299 if the other operand is not constant as well. */
4301 if (swap_commutative_operands_p (op0
, op1
))
4306 code
= swap_condition (code
);
4309 /* get_condition will prefer to generate LT and GT even if the old
4310 comparison was against zero, so undo that canonicalization here since
4311 comparisons against zero are cheaper. */
4312 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4313 code
= LE
, op1
= const0_rtx
;
4314 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4315 code
= GE
, op1
= const0_rtx
;
4317 if (cmode
== VOIDmode
)
4318 cmode
= GET_MODE (op0
);
4320 if (swap_commutative_operands_p (op2
, op3
)
4321 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4330 if (mode
== VOIDmode
)
4331 mode
= GET_MODE (op2
);
4333 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4335 if (icode
== CODE_FOR_nothing
)
4340 op2
= force_not_mem (op2
);
4341 op3
= force_not_mem (op3
);
4345 target
= protect_from_queue (target
, 1);
4347 target
= gen_reg_rtx (mode
);
4353 op2
= protect_from_queue (op2
, 0);
4354 op3
= protect_from_queue (op3
, 0);
4356 /* If the insn doesn't accept these operands, put them in pseudos. */
4358 if (! (*insn_data
[icode
].operand
[0].predicate
)
4359 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4360 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4362 if (! (*insn_data
[icode
].operand
[2].predicate
)
4363 (op2
, insn_data
[icode
].operand
[2].mode
))
4364 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4366 if (! (*insn_data
[icode
].operand
[3].predicate
)
4367 (op3
, insn_data
[icode
].operand
[3].mode
))
4368 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4370 /* Everything should now be in the suitable form, so emit the compare insn
4371 and then the conditional move. */
4374 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4376 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4377 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4378 return NULL and let the caller figure out how best to deal with this
4380 if (GET_CODE (comparison
) != code
)
4383 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4385 /* If that failed, then give up. */
4391 if (subtarget
!= target
)
4392 convert_move (target
, subtarget
, 0);
4397 /* These functions generate an insn body and return it
4398 rather than emitting the insn.
4400 They do not protect from queued increments,
4401 because they may be used 1) in protect_from_queue itself
4402 and 2) in other passes where there is no queue. */
4404 /* Generate and return an insn body to add Y to X. */
4407 gen_add2_insn (x
, y
)
4410 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4412 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4413 (x
, insn_data
[icode
].operand
[0].mode
))
4414 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4415 (x
, insn_data
[icode
].operand
[1].mode
))
4416 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4417 (y
, insn_data
[icode
].operand
[2].mode
)))
4420 return (GEN_FCN (icode
) (x
, x
, y
));
4423 /* Generate and return an insn body to add r1 and c,
4424 storing the result in r0. */
4426 gen_add3_insn (r0
, r1
, c
)
4429 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4431 if (icode
== CODE_FOR_nothing
4432 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4433 (r0
, insn_data
[icode
].operand
[0].mode
))
4434 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4435 (r1
, insn_data
[icode
].operand
[1].mode
))
4436 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4437 (c
, insn_data
[icode
].operand
[2].mode
)))
4440 return (GEN_FCN (icode
) (r0
, r1
, c
));
4444 have_add2_insn (x
, y
)
4449 if (GET_MODE (x
) == VOIDmode
)
4452 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4454 if (icode
== CODE_FOR_nothing
)
4457 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4458 (x
, insn_data
[icode
].operand
[0].mode
))
4459 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4460 (x
, insn_data
[icode
].operand
[1].mode
))
4461 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4462 (y
, insn_data
[icode
].operand
[2].mode
)))
4468 /* Generate and return an insn body to subtract Y from X. */
4471 gen_sub2_insn (x
, y
)
4474 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4476 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4477 (x
, insn_data
[icode
].operand
[0].mode
))
4478 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4479 (x
, insn_data
[icode
].operand
[1].mode
))
4480 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4481 (y
, insn_data
[icode
].operand
[2].mode
)))
4484 return (GEN_FCN (icode
) (x
, x
, y
));
4487 /* Generate and return an insn body to subtract r1 and c,
4488 storing the result in r0. */
4490 gen_sub3_insn (r0
, r1
, c
)
4493 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4495 if (icode
== CODE_FOR_nothing
4496 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4497 (r0
, insn_data
[icode
].operand
[0].mode
))
4498 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4499 (r1
, insn_data
[icode
].operand
[1].mode
))
4500 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4501 (c
, insn_data
[icode
].operand
[2].mode
)))
4504 return (GEN_FCN (icode
) (r0
, r1
, c
));
4508 have_sub2_insn (x
, y
)
4513 if (GET_MODE (x
) == VOIDmode
)
4516 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4518 if (icode
== CODE_FOR_nothing
)
4521 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4522 (x
, insn_data
[icode
].operand
[0].mode
))
4523 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4524 (x
, insn_data
[icode
].operand
[1].mode
))
4525 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4526 (y
, insn_data
[icode
].operand
[2].mode
)))
4532 /* Generate the body of an instruction to copy Y into X.
4533 It may be a list of insns, if one insn isn't enough. */
4536 gen_move_insn (x
, y
)
4539 enum machine_mode mode
= GET_MODE (x
);
4540 enum insn_code insn_code
;
4543 if (mode
== VOIDmode
)
4544 mode
= GET_MODE (y
);
4546 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
4548 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
4549 find a mode to do it in. If we have a movcc, use it. Otherwise,
4550 find the MODE_INT mode of the same width. */
4552 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
4554 enum machine_mode tmode
= VOIDmode
;
4558 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
4561 for (tmode
= QImode
; tmode
!= VOIDmode
;
4562 tmode
= GET_MODE_WIDER_MODE (tmode
))
4563 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
4566 if (tmode
== VOIDmode
)
4569 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
4570 may call change_address which is not appropriate if we were
4571 called when a reload was in progress. We don't have to worry
4572 about changing the address since the size in bytes is supposed to
4573 be the same. Copy the MEM to change the mode and move any
4574 substitutions from the old MEM to the new one. */
4576 if (reload_in_progress
)
4578 x
= gen_lowpart_common (tmode
, x1
);
4579 if (x
== 0 && GET_CODE (x1
) == MEM
)
4581 x
= adjust_address_nv (x1
, tmode
, 0);
4582 copy_replacements (x1
, x
);
4585 y
= gen_lowpart_common (tmode
, y1
);
4586 if (y
== 0 && GET_CODE (y1
) == MEM
)
4588 y
= adjust_address_nv (y1
, tmode
, 0);
4589 copy_replacements (y1
, y
);
4594 x
= gen_lowpart (tmode
, x
);
4595 y
= gen_lowpart (tmode
, y
);
4598 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
4599 return (GEN_FCN (insn_code
) (x
, y
));
4603 emit_move_insn_1 (x
, y
);
4609 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4610 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4611 no such operation exists, CODE_FOR_nothing will be returned. */
4614 can_extend_p (to_mode
, from_mode
, unsignedp
)
4615 enum machine_mode to_mode
, from_mode
;
4618 #ifdef HAVE_ptr_extend
4620 return CODE_FOR_ptr_extend
;
4623 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4626 /* Generate the body of an insn to extend Y (with mode MFROM)
4627 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4630 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4632 enum machine_mode mto
, mfrom
;
4635 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4638 /* can_fix_p and can_float_p say whether the target machine
4639 can directly convert a given fixed point type to
4640 a given floating point type, or vice versa.
4641 The returned value is the CODE_FOR_... value to use,
4642 or CODE_FOR_nothing if these modes cannot be directly converted.
4644 *TRUNCP_PTR is set to 1 if it is necessary to output
4645 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4647 static enum insn_code
4648 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4649 enum machine_mode fltmode
, fixmode
;
4654 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4655 != CODE_FOR_nothing
)
4656 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4658 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4661 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4663 return CODE_FOR_nothing
;
4666 static enum insn_code
4667 can_float_p (fltmode
, fixmode
, unsignedp
)
4668 enum machine_mode fixmode
, fltmode
;
4671 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4674 /* Generate code to convert FROM to floating point
4675 and store in TO. FROM must be fixed point and not VOIDmode.
4676 UNSIGNEDP nonzero means regard FROM as unsigned.
4677 Normally this is done by correcting the final value
4678 if it is negative. */
4681 expand_float (to
, from
, unsignedp
)
4685 enum insn_code icode
;
4687 enum machine_mode fmode
, imode
;
4689 /* Crash now, because we won't be able to decide which mode to use. */
4690 if (GET_MODE (from
) == VOIDmode
)
4693 /* Look for an insn to do the conversion. Do it in the specified
4694 modes if possible; otherwise convert either input, output or both to
4695 wider mode. If the integer mode is wider than the mode of FROM,
4696 we can do the conversion signed even if the input is unsigned. */
4698 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4699 imode
= GET_MODE_WIDER_MODE (imode
))
4700 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4701 fmode
= GET_MODE_WIDER_MODE (fmode
))
4703 int doing_unsigned
= unsignedp
;
4705 if (fmode
!= GET_MODE (to
)
4706 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4709 icode
= can_float_p (fmode
, imode
, unsignedp
);
4710 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4711 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4713 if (icode
!= CODE_FOR_nothing
)
4715 to
= protect_from_queue (to
, 1);
4716 from
= protect_from_queue (from
, 0);
4718 if (imode
!= GET_MODE (from
))
4719 from
= convert_to_mode (imode
, from
, unsignedp
);
4721 if (fmode
!= GET_MODE (to
))
4722 target
= gen_reg_rtx (fmode
);
4724 emit_unop_insn (icode
, target
, from
,
4725 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4728 convert_move (to
, target
, 0);
4733 /* Unsigned integer, and no way to convert directly.
4734 Convert as signed, then conditionally adjust the result. */
4737 rtx label
= gen_label_rtx ();
4739 REAL_VALUE_TYPE offset
;
4743 to
= protect_from_queue (to
, 1);
4744 from
= protect_from_queue (from
, 0);
4747 from
= force_not_mem (from
);
4749 /* Look for a usable floating mode FMODE wider than the source and at
4750 least as wide as the target. Using FMODE will avoid rounding woes
4751 with unsigned values greater than the signed maximum value. */
4753 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4754 fmode
= GET_MODE_WIDER_MODE (fmode
))
4755 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4756 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4759 if (fmode
== VOIDmode
)
4761 /* There is no such mode. Pretend the target is wide enough. */
4762 fmode
= GET_MODE (to
);
4764 /* Avoid double-rounding when TO is narrower than FROM. */
4765 if ((significand_size (fmode
) + 1)
4766 < GET_MODE_BITSIZE (GET_MODE (from
)))
4769 rtx neglabel
= gen_label_rtx ();
4771 /* Don't use TARGET if it isn't a register, is a hard register,
4772 or is the wrong mode. */
4773 if (GET_CODE (target
) != REG
4774 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4775 || GET_MODE (target
) != fmode
)
4776 target
= gen_reg_rtx (fmode
);
4778 imode
= GET_MODE (from
);
4779 do_pending_stack_adjust ();
4781 /* Test whether the sign bit is set. */
4782 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4785 /* The sign bit is not set. Convert as signed. */
4786 expand_float (target
, from
, 0);
4787 emit_jump_insn (gen_jump (label
));
4790 /* The sign bit is set.
4791 Convert to a usable (positive signed) value by shifting right
4792 one bit, while remembering if a nonzero bit was shifted
4793 out; i.e., compute (from & 1) | (from >> 1). */
4795 emit_label (neglabel
);
4796 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4797 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4798 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4800 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4802 expand_float (target
, temp
, 0);
4804 /* Multiply by 2 to undo the shift above. */
4805 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4806 target
, 0, OPTAB_LIB_WIDEN
);
4808 emit_move_insn (target
, temp
);
4810 do_pending_stack_adjust ();
4816 /* If we are about to do some arithmetic to correct for an
4817 unsigned operand, do it in a pseudo-register. */
4819 if (GET_MODE (to
) != fmode
4820 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4821 target
= gen_reg_rtx (fmode
);
4823 /* Convert as signed integer to floating. */
4824 expand_float (target
, from
, 0);
4826 /* If FROM is negative (and therefore TO is negative),
4827 correct its value by 2**bitwidth. */
4829 do_pending_stack_adjust ();
4830 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4834 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4835 temp
= expand_binop (fmode
, add_optab
, target
,
4836 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4837 target
, 0, OPTAB_LIB_WIDEN
);
4839 emit_move_insn (target
, temp
);
4841 do_pending_stack_adjust ();
4846 /* No hardware instruction available; call a library routine to convert from
4847 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4853 to
= protect_from_queue (to
, 1);
4854 from
= protect_from_queue (from
, 0);
4856 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4857 from
= convert_to_mode (SImode
, from
, unsignedp
);
4860 from
= force_not_mem (from
);
4862 if (GET_MODE (to
) == SFmode
)
4864 if (GET_MODE (from
) == SImode
)
4865 libfcn
= floatsisf_libfunc
;
4866 else if (GET_MODE (from
) == DImode
)
4867 libfcn
= floatdisf_libfunc
;
4868 else if (GET_MODE (from
) == TImode
)
4869 libfcn
= floattisf_libfunc
;
4873 else if (GET_MODE (to
) == DFmode
)
4875 if (GET_MODE (from
) == SImode
)
4876 libfcn
= floatsidf_libfunc
;
4877 else if (GET_MODE (from
) == DImode
)
4878 libfcn
= floatdidf_libfunc
;
4879 else if (GET_MODE (from
) == TImode
)
4880 libfcn
= floattidf_libfunc
;
4884 else if (GET_MODE (to
) == XFmode
)
4886 if (GET_MODE (from
) == SImode
)
4887 libfcn
= floatsixf_libfunc
;
4888 else if (GET_MODE (from
) == DImode
)
4889 libfcn
= floatdixf_libfunc
;
4890 else if (GET_MODE (from
) == TImode
)
4891 libfcn
= floattixf_libfunc
;
4895 else if (GET_MODE (to
) == TFmode
)
4897 if (GET_MODE (from
) == SImode
)
4898 libfcn
= floatsitf_libfunc
;
4899 else if (GET_MODE (from
) == DImode
)
4900 libfcn
= floatditf_libfunc
;
4901 else if (GET_MODE (from
) == TImode
)
4902 libfcn
= floattitf_libfunc
;
4911 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
4912 GET_MODE (to
), 1, from
,
4914 insns
= get_insns ();
4917 emit_libcall_block (insns
, target
, value
,
4918 gen_rtx_FLOAT (GET_MODE (to
), from
));
4923 /* Copy result to requested destination
4924 if we have been computing in a temp location. */
4928 if (GET_MODE (target
) == GET_MODE (to
))
4929 emit_move_insn (to
, target
);
4931 convert_move (to
, target
, 0);
4935 /* expand_fix: generate code to convert FROM to fixed point
4936 and store in TO. FROM must be floating point. */
4942 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4943 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4947 expand_fix (to
, from
, unsignedp
)
4951 enum insn_code icode
;
4953 enum machine_mode fmode
, imode
;
4957 /* We first try to find a pair of modes, one real and one integer, at
4958 least as wide as FROM and TO, respectively, in which we can open-code
4959 this conversion. If the integer mode is wider than the mode of TO,
4960 we can do the conversion either signed or unsigned. */
4962 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4963 fmode
= GET_MODE_WIDER_MODE (fmode
))
4964 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4965 imode
= GET_MODE_WIDER_MODE (imode
))
4967 int doing_unsigned
= unsignedp
;
4969 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4970 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4971 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4973 if (icode
!= CODE_FOR_nothing
)
4975 to
= protect_from_queue (to
, 1);
4976 from
= protect_from_queue (from
, 0);
4978 if (fmode
!= GET_MODE (from
))
4979 from
= convert_to_mode (fmode
, from
, 0);
4982 from
= ftruncify (from
);
4984 if (imode
!= GET_MODE (to
))
4985 target
= gen_reg_rtx (imode
);
4987 emit_unop_insn (icode
, target
, from
,
4988 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4990 convert_move (to
, target
, unsignedp
);
4995 /* For an unsigned conversion, there is one more way to do it.
4996 If we have a signed conversion, we generate code that compares
4997 the real value to the largest representable positive number. If if
4998 is smaller, the conversion is done normally. Otherwise, subtract
4999 one plus the highest signed number, convert, and add it back.
5001 We only need to check all real modes, since we know we didn't find
5002 anything with a wider integer mode. */
5004 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5005 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5006 fmode
= GET_MODE_WIDER_MODE (fmode
))
5007 /* Make sure we won't lose significant bits doing this. */
5008 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
5009 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
5013 REAL_VALUE_TYPE offset
;
5014 rtx limit
, lab1
, lab2
, insn
;
5016 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5017 real_2expN (&offset
, bitsize
- 1);
5018 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5019 lab1
= gen_label_rtx ();
5020 lab2
= gen_label_rtx ();
5023 to
= protect_from_queue (to
, 1);
5024 from
= protect_from_queue (from
, 0);
5027 from
= force_not_mem (from
);
5029 if (fmode
!= GET_MODE (from
))
5030 from
= convert_to_mode (fmode
, from
, 0);
5032 /* See if we need to do the subtraction. */
5033 do_pending_stack_adjust ();
5034 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5037 /* If not, do the signed "fix" and branch around fixup code. */
5038 expand_fix (to
, from
, 0);
5039 emit_jump_insn (gen_jump (lab2
));
5042 /* Otherwise, subtract 2**(N-1), convert to signed number,
5043 then add 2**(N-1). Do the addition using XOR since this
5044 will often generate better code. */
5046 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5047 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5048 expand_fix (to
, target
, 0);
5049 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5051 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5053 to
, 1, OPTAB_LIB_WIDEN
);
5056 emit_move_insn (to
, target
);
5060 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
5061 != CODE_FOR_nothing
)
5063 /* Make a place for a REG_NOTE and add it. */
5064 insn
= emit_move_insn (to
, to
);
5065 set_unique_reg_note (insn
,
5067 gen_rtx_fmt_e (UNSIGNED_FIX
,
5075 /* We can't do it with an insn, so use a library call. But first ensure
5076 that the mode of TO is at least as wide as SImode, since those are the
5077 only library calls we know about. */
5079 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5081 target
= gen_reg_rtx (SImode
);
5083 expand_fix (target
, from
, unsignedp
);
5085 else if (GET_MODE (from
) == SFmode
)
5087 if (GET_MODE (to
) == SImode
)
5088 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
5089 else if (GET_MODE (to
) == DImode
)
5090 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
5091 else if (GET_MODE (to
) == TImode
)
5092 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
5096 else if (GET_MODE (from
) == DFmode
)
5098 if (GET_MODE (to
) == SImode
)
5099 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
5100 else if (GET_MODE (to
) == DImode
)
5101 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
5102 else if (GET_MODE (to
) == TImode
)
5103 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
5107 else if (GET_MODE (from
) == XFmode
)
5109 if (GET_MODE (to
) == SImode
)
5110 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
5111 else if (GET_MODE (to
) == DImode
)
5112 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
5113 else if (GET_MODE (to
) == TImode
)
5114 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
5118 else if (GET_MODE (from
) == TFmode
)
5120 if (GET_MODE (to
) == SImode
)
5121 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
5122 else if (GET_MODE (to
) == DImode
)
5123 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
5124 else if (GET_MODE (to
) == TImode
)
5125 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
5137 to
= protect_from_queue (to
, 1);
5138 from
= protect_from_queue (from
, 0);
5141 from
= force_not_mem (from
);
5145 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
5146 GET_MODE (to
), 1, from
,
5148 insns
= get_insns ();
5151 emit_libcall_block (insns
, target
, value
,
5152 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5153 GET_MODE (to
), from
));
5158 if (GET_MODE (to
) == GET_MODE (target
))
5159 emit_move_insn (to
, target
);
5161 convert_move (to
, target
, 0);
5165 /* Report whether we have an instruction to perform the operation
5166 specified by CODE on operands of mode MODE. */
5168 have_insn_for (code
, mode
)
5170 enum machine_mode mode
;
5172 return (code_to_optab
[(int) code
] != 0
5173 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
5174 != CODE_FOR_nothing
));
5177 /* Create a blank optab. */
5182 optab op
= (optab
) ggc_alloc (sizeof (struct optab
));
5183 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5185 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
5186 op
->handlers
[i
].libfunc
= 0;
5192 /* Same, but fill in its code as CODE, and write it into the
5193 code_to_optab table. */
5198 optab op
= new_optab ();
5200 code_to_optab
[(int) code
] = op
;
5204 /* Same, but fill in its code as CODE, and do _not_ write it into
5205 the code_to_optab table. */
5210 optab op
= new_optab ();
5215 /* Initialize the libfunc fields of an entire group of entries in some
5216 optab. Each entry is set equal to a string consisting of a leading
5217 pair of underscores followed by a generic operation name followed by
5218 a mode name (downshifted to lower case) followed by a single character
5219 representing the number of operands for the given operation (which is
5220 usually one of the characters '2', '3', or '4').
5222 OPTABLE is the table in which libfunc fields are to be initialized.
5223 FIRST_MODE is the first machine mode index in the given optab to
5225 LAST_MODE is the last machine mode index in the given optab to
5227 OPNAME is the generic (string) name of the operation.
5228 SUFFIX is the character which specifies the number of operands for
5229 the given generic operation.
5233 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
5241 unsigned opname_len
= strlen (opname
);
5243 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5244 mode
= (enum machine_mode
) ((int) mode
+ 1))
5246 const char *mname
= GET_MODE_NAME (mode
);
5247 unsigned mname_len
= strlen (mname
);
5248 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5255 for (q
= opname
; *q
; )
5257 for (q
= mname
; *q
; q
++)
5258 *p
++ = TOLOWER (*q
);
5262 optable
->handlers
[(int) mode
].libfunc
5263 = gen_rtx_SYMBOL_REF (Pmode
, ggc_alloc_string (libfunc_name
,
5268 /* Initialize the libfunc fields of an entire group of entries in some
5269 optab which correspond to all integer mode operations. The parameters
5270 have the same meaning as similarly named ones for the `init_libfuncs'
5271 routine. (See above). */
5274 init_integral_libfuncs (optable
, opname
, suffix
)
5279 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
5282 /* Initialize the libfunc fields of an entire group of entries in some
5283 optab which correspond to all real mode operations. The parameters
5284 have the same meaning as similarly named ones for the `init_libfuncs'
5285 routine. (See above). */
5288 init_floating_libfuncs (optable
, opname
, suffix
)
5293 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
5297 init_one_libfunc (name
)
5300 /* Create a FUNCTION_DECL that can be passed to
5301 targetm.encode_section_info. */
5302 /* ??? We don't have any type information except for this is
5303 a function. Pretend this is "int foo()". */
5304 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5305 build_function_type (integer_type_node
, NULL_TREE
));
5306 DECL_ARTIFICIAL (decl
) = 1;
5307 DECL_EXTERNAL (decl
) = 1;
5308 TREE_PUBLIC (decl
) = 1;
5310 /* Return the symbol_ref from the mem rtx. */
5311 return XEXP (DECL_RTL (decl
), 0);
5314 /* Call this once to initialize the contents of the optabs
5315 appropriately for the current target machine. */
5320 unsigned int i
, j
, k
;
5322 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5324 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
5325 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
5326 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
5327 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
5329 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
5330 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
5331 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
5332 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
5334 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
5335 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
5336 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
5337 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
5339 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
5340 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
5341 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
5342 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
5344 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5345 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5347 #ifdef HAVE_conditional_move
5348 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5349 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5352 add_optab
= init_optab (PLUS
);
5353 addv_optab
= init_optabv (PLUS
);
5354 sub_optab
= init_optab (MINUS
);
5355 subv_optab
= init_optabv (MINUS
);
5356 smul_optab
= init_optab (MULT
);
5357 smulv_optab
= init_optabv (MULT
);
5358 smul_highpart_optab
= init_optab (UNKNOWN
);
5359 umul_highpart_optab
= init_optab (UNKNOWN
);
5360 smul_widen_optab
= init_optab (UNKNOWN
);
5361 umul_widen_optab
= init_optab (UNKNOWN
);
5362 sdiv_optab
= init_optab (DIV
);
5363 sdivv_optab
= init_optabv (DIV
);
5364 sdivmod_optab
= init_optab (UNKNOWN
);
5365 udiv_optab
= init_optab (UDIV
);
5366 udivmod_optab
= init_optab (UNKNOWN
);
5367 smod_optab
= init_optab (MOD
);
5368 umod_optab
= init_optab (UMOD
);
5369 ftrunc_optab
= init_optab (UNKNOWN
);
5370 and_optab
= init_optab (AND
);
5371 ior_optab
= init_optab (IOR
);
5372 xor_optab
= init_optab (XOR
);
5373 ashl_optab
= init_optab (ASHIFT
);
5374 ashr_optab
= init_optab (ASHIFTRT
);
5375 lshr_optab
= init_optab (LSHIFTRT
);
5376 rotl_optab
= init_optab (ROTATE
);
5377 rotr_optab
= init_optab (ROTATERT
);
5378 smin_optab
= init_optab (SMIN
);
5379 smax_optab
= init_optab (SMAX
);
5380 umin_optab
= init_optab (UMIN
);
5381 umax_optab
= init_optab (UMAX
);
5383 /* These three have codes assigned exclusively for the sake of
5385 mov_optab
= init_optab (SET
);
5386 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5387 cmp_optab
= init_optab (COMPARE
);
5389 ucmp_optab
= init_optab (UNKNOWN
);
5390 tst_optab
= init_optab (UNKNOWN
);
5391 neg_optab
= init_optab (NEG
);
5392 negv_optab
= init_optabv (NEG
);
5393 abs_optab
= init_optab (ABS
);
5394 absv_optab
= init_optabv (ABS
);
5395 addcc_optab
= init_optab (UNKNOWN
);
5396 one_cmpl_optab
= init_optab (NOT
);
5397 ffs_optab
= init_optab (FFS
);
5398 sqrt_optab
= init_optab (SQRT
);
5399 floor_optab
= init_optab (UNKNOWN
);
5400 ceil_optab
= init_optab (UNKNOWN
);
5401 round_optab
= init_optab (UNKNOWN
);
5402 trunc_optab
= init_optab (UNKNOWN
);
5403 nearbyint_optab
= init_optab (UNKNOWN
);
5404 sin_optab
= init_optab (UNKNOWN
);
5405 cos_optab
= init_optab (UNKNOWN
);
5406 exp_optab
= init_optab (UNKNOWN
);
5407 log_optab
= init_optab (UNKNOWN
);
5408 strlen_optab
= init_optab (UNKNOWN
);
5409 cbranch_optab
= init_optab (UNKNOWN
);
5410 cmov_optab
= init_optab (UNKNOWN
);
5411 cstore_optab
= init_optab (UNKNOWN
);
5412 push_optab
= init_optab (UNKNOWN
);
5414 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5416 movstr_optab
[i
] = CODE_FOR_nothing
;
5417 clrstr_optab
[i
] = CODE_FOR_nothing
;
5419 #ifdef HAVE_SECONDARY_RELOADS
5420 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5424 /* Fill in the optabs with the insns we support. */
5427 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5428 /* This flag says the same insns that convert to a signed fixnum
5429 also convert validly to an unsigned one. */
5430 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5431 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5432 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
5435 /* Initialize the optabs with the names of the library functions. */
5436 init_integral_libfuncs (add_optab
, "add", '3');
5437 init_floating_libfuncs (add_optab
, "add", '3');
5438 init_integral_libfuncs (addv_optab
, "addv", '3');
5439 init_floating_libfuncs (addv_optab
, "add", '3');
5440 init_integral_libfuncs (sub_optab
, "sub", '3');
5441 init_floating_libfuncs (sub_optab
, "sub", '3');
5442 init_integral_libfuncs (subv_optab
, "subv", '3');
5443 init_floating_libfuncs (subv_optab
, "sub", '3');
5444 init_integral_libfuncs (smul_optab
, "mul", '3');
5445 init_floating_libfuncs (smul_optab
, "mul", '3');
5446 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5447 init_floating_libfuncs (smulv_optab
, "mul", '3');
5448 init_integral_libfuncs (sdiv_optab
, "div", '3');
5449 init_floating_libfuncs (sdiv_optab
, "div", '3');
5450 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5451 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5452 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5453 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5454 init_integral_libfuncs (smod_optab
, "mod", '3');
5455 init_integral_libfuncs (umod_optab
, "umod", '3');
5456 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5457 init_integral_libfuncs (and_optab
, "and", '3');
5458 init_integral_libfuncs (ior_optab
, "ior", '3');
5459 init_integral_libfuncs (xor_optab
, "xor", '3');
5460 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5461 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5462 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5463 init_integral_libfuncs (smin_optab
, "min", '3');
5464 init_floating_libfuncs (smin_optab
, "min", '3');
5465 init_integral_libfuncs (smax_optab
, "max", '3');
5466 init_floating_libfuncs (smax_optab
, "max", '3');
5467 init_integral_libfuncs (umin_optab
, "umin", '3');
5468 init_integral_libfuncs (umax_optab
, "umax", '3');
5469 init_integral_libfuncs (neg_optab
, "neg", '2');
5470 init_floating_libfuncs (neg_optab
, "neg", '2');
5471 init_integral_libfuncs (negv_optab
, "negv", '2');
5472 init_floating_libfuncs (negv_optab
, "neg", '2');
5473 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5474 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5476 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5477 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5478 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5479 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5481 #ifdef MULSI3_LIBCALL
5482 smul_optab
->handlers
[(int) SImode
].libfunc
5483 = init_one_libfunc (MULSI3_LIBCALL
);
5485 #ifdef MULDI3_LIBCALL
5486 smul_optab
->handlers
[(int) DImode
].libfunc
5487 = init_one_libfunc (MULDI3_LIBCALL
);
5490 #ifdef DIVSI3_LIBCALL
5491 sdiv_optab
->handlers
[(int) SImode
].libfunc
5492 = init_one_libfunc (DIVSI3_LIBCALL
);
5494 #ifdef DIVDI3_LIBCALL
5495 sdiv_optab
->handlers
[(int) DImode
].libfunc
5496 = init_one_libfunc (DIVDI3_LIBCALL
);
5499 #ifdef UDIVSI3_LIBCALL
5500 udiv_optab
->handlers
[(int) SImode
].libfunc
5501 = init_one_libfunc (UDIVSI3_LIBCALL
);
5503 #ifdef UDIVDI3_LIBCALL
5504 udiv_optab
->handlers
[(int) DImode
].libfunc
5505 = init_one_libfunc (UDIVDI3_LIBCALL
);
5508 #ifdef MODSI3_LIBCALL
5509 smod_optab
->handlers
[(int) SImode
].libfunc
5510 = init_one_libfunc (MODSI3_LIBCALL
);
5512 #ifdef MODDI3_LIBCALL
5513 smod_optab
->handlers
[(int) DImode
].libfunc
5514 = init_one_libfunc (MODDI3_LIBCALL
);
5517 #ifdef UMODSI3_LIBCALL
5518 umod_optab
->handlers
[(int) SImode
].libfunc
5519 = init_one_libfunc (UMODSI3_LIBCALL
);
5521 #ifdef UMODDI3_LIBCALL
5522 umod_optab
->handlers
[(int) DImode
].libfunc
5523 = init_one_libfunc (UMODDI3_LIBCALL
);
5526 /* Use cabs for DC complex abs, since systems generally have cabs.
5527 Don't define any libcall for SCmode, so that cabs will be used. */
5528 abs_optab
->handlers
[(int) DCmode
].libfunc
5529 = init_one_libfunc ("cabs");
5531 /* The ffs function operates on `int'. */
5532 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5533 = init_one_libfunc ("ffs");
5535 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
5536 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
5537 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
5538 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
5539 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
5541 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
5542 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
5543 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
5544 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
5545 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
5547 abort_libfunc
= init_one_libfunc ("abort");
5548 memcpy_libfunc
= init_one_libfunc ("memcpy");
5549 memmove_libfunc
= init_one_libfunc ("memmove");
5550 bcopy_libfunc
= init_one_libfunc ("bcopy");
5551 memcmp_libfunc
= init_one_libfunc ("memcmp");
5552 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5553 memset_libfunc
= init_one_libfunc ("memset");
5554 bzero_libfunc
= init_one_libfunc ("bzero");
5556 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5557 ? "_Unwind_SjLj_Resume"
5558 : "_Unwind_Resume");
5559 #ifndef DONT_USE_BUILTIN_SETJMP
5560 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5561 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5563 setjmp_libfunc
= init_one_libfunc ("setjmp");
5564 longjmp_libfunc
= init_one_libfunc ("longjmp");
5566 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5567 unwind_sjlj_unregister_libfunc
5568 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5570 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
5571 nehf2_libfunc
= init_one_libfunc ("__nehf2");
5572 gthf2_libfunc
= init_one_libfunc ("__gthf2");
5573 gehf2_libfunc
= init_one_libfunc ("__gehf2");
5574 lthf2_libfunc
= init_one_libfunc ("__lthf2");
5575 lehf2_libfunc
= init_one_libfunc ("__lehf2");
5576 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
5578 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
5579 nesf2_libfunc
= init_one_libfunc ("__nesf2");
5580 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
5581 gesf2_libfunc
= init_one_libfunc ("__gesf2");
5582 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
5583 lesf2_libfunc
= init_one_libfunc ("__lesf2");
5584 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
5586 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
5587 nedf2_libfunc
= init_one_libfunc ("__nedf2");
5588 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
5589 gedf2_libfunc
= init_one_libfunc ("__gedf2");
5590 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
5591 ledf2_libfunc
= init_one_libfunc ("__ledf2");
5592 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
5594 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
5595 nexf2_libfunc
= init_one_libfunc ("__nexf2");
5596 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
5597 gexf2_libfunc
= init_one_libfunc ("__gexf2");
5598 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
5599 lexf2_libfunc
= init_one_libfunc ("__lexf2");
5600 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
5602 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
5603 netf2_libfunc
= init_one_libfunc ("__netf2");
5604 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5605 getf2_libfunc
= init_one_libfunc ("__getf2");
5606 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5607 letf2_libfunc
= init_one_libfunc ("__letf2");
5608 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5610 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5611 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5612 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5614 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5615 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5616 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5618 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5619 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5620 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5622 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5623 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5624 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5626 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5627 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5628 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5630 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5631 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5632 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5634 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5635 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5636 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5638 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5639 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5640 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5642 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5643 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5644 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5646 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5647 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5648 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5650 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5651 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5652 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5654 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5655 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5656 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5658 /* For function entry/exit instrumentation. */
5659 profile_function_entry_libfunc
5660 = init_one_libfunc ("__cyg_profile_func_enter");
5661 profile_function_exit_libfunc
5662 = init_one_libfunc ("__cyg_profile_func_exit");
5664 #ifdef HAVE_conditional_trap
5668 #ifdef INIT_TARGET_OPTABS
5669 /* Allow the target to add more libcalls or rename some, etc. */
5674 static GTY(()) rtx trap_rtx
;
5676 #ifdef HAVE_conditional_trap
5677 /* The insn generating function can not take an rtx_code argument.
5678 TRAP_RTX is used as an rtx argument. Its code is replaced with
5679 the code to be used in the trap insn and all other fields are
5685 if (HAVE_conditional_trap
)
5687 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5692 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5693 CODE. Return 0 on failure. */
5696 gen_cond_trap (code
, op1
, op2
, tcode
)
5697 enum rtx_code code ATTRIBUTE_UNUSED
;
5698 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5700 enum machine_mode mode
= GET_MODE (op1
);
5702 if (mode
== VOIDmode
)
5705 #ifdef HAVE_conditional_trap
5706 if (HAVE_conditional_trap
5707 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5711 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5712 PUT_CODE (trap_rtx
, code
);
5713 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5717 insn
= get_insns ();
5727 #include "gt-optabs.h"