1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
54 See expr.h for documentation of these optabs. */
56 optab optab_table
[OTI_MAX
];
58 rtx libfunc_table
[LTI_MAX
];
60 /* Tables of patterns for extending one integer mode to another. */
61 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
63 /* Tables of patterns for converting between fixed and floating point. */
64 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
65 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
66 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
68 /* Contains the optab used for each rtx code. */
69 optab code_to_optab
[NUM_RTX_CODE
+ 1];
71 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
72 gives the gen_function to make a branch to test that condition. */
74 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
76 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
77 gives the insn code to make a store-condition insn
78 to test that condition. */
80 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
82 #ifdef HAVE_conditional_move
83 /* Indexed by the machine mode, gives the insn code to make a conditional
84 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
85 setcc_gen_code to cut down on the number of named patterns. Consider a day
86 when a lot more rtx codes are conditional (eg: for the ARM). */
88 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
91 static int add_equal_note
PARAMS ((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
92 static rtx widen_operand
PARAMS ((rtx
, enum machine_mode
,
93 enum machine_mode
, int, int));
94 static int expand_cmplxdiv_straight
PARAMS ((rtx
, rtx
, rtx
, rtx
,
95 rtx
, rtx
, enum machine_mode
,
96 int, enum optab_methods
,
97 enum mode_class
, optab
));
98 static int expand_cmplxdiv_wide
PARAMS ((rtx
, rtx
, rtx
, rtx
,
99 rtx
, rtx
, enum machine_mode
,
100 int, enum optab_methods
,
101 enum mode_class
, optab
));
102 static void prepare_cmp_insn
PARAMS ((rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
));
105 static enum insn_code can_fix_p
PARAMS ((enum machine_mode
, enum machine_mode
,
107 static enum insn_code can_float_p
PARAMS ((enum machine_mode
,
110 static rtx ftruncify
PARAMS ((rtx
));
111 static optab new_optab
PARAMS ((void));
112 static inline optab init_optab
PARAMS ((enum rtx_code
));
113 static inline optab init_optabv
PARAMS ((enum rtx_code
));
114 static void init_libfuncs
PARAMS ((optab
, int, int, const char *, int));
115 static void init_integral_libfuncs
PARAMS ((optab
, const char *, int));
116 static void init_floating_libfuncs
PARAMS ((optab
, const char *, int));
117 #ifdef HAVE_conditional_trap
118 static void init_traps
PARAMS ((void));
120 static void emit_cmp_and_jump_insn_1
PARAMS ((rtx
, rtx
, enum machine_mode
,
121 enum rtx_code
, int, rtx
));
122 static void prepare_float_lib_cmp
PARAMS ((rtx
*, rtx
*, enum rtx_code
*,
123 enum machine_mode
*, int *));
124 static rtx expand_vector_binop
PARAMS ((enum machine_mode
, optab
,
126 enum optab_methods
));
127 static rtx expand_vector_unop
PARAMS ((enum machine_mode
, optab
, rtx
, rtx
,
129 static rtx widen_clz
PARAMS ((enum machine_mode
, rtx
, rtx
));
130 static rtx expand_parity
PARAMS ((enum machine_mode
, rtx
, rtx
));
132 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
133 the result of operation CODE applied to OP0 (and OP1 if it is a binary
136 If the last insn does not set TARGET, don't do anything, but return 1.
138 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
139 don't add the REG_EQUAL note but return 0. Our caller can then try
140 again, ensuring that TARGET is not one of the operands. */
143 add_equal_note (insns
, target
, code
, op0
, op1
)
149 rtx last_insn
, insn
, set
;
154 || NEXT_INSN (insns
) == NULL_RTX
)
157 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
158 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
176 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
177 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
181 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
182 besides the last insn. */
183 if (reg_overlap_mentioned_p (target
, op0
)
184 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
186 insn
= PREV_INSN (last_insn
);
187 while (insn
!= NULL_RTX
)
189 if (reg_set_p (target
, insn
))
192 insn
= PREV_INSN (insn
);
196 if (GET_RTX_CLASS (code
) == '1')
197 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
199 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
201 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
206 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
207 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
208 not actually do a sign-extend or zero-extend, but can leave the
209 higher-order bits of the result rtx undefined, for example, in the case
210 of logical operations, but not right shifts. */
213 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
215 enum machine_mode mode
, oldmode
;
221 /* If we don't have to extend and this is a constant, return it. */
222 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
225 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
226 extend since it will be more efficient to do so unless the signedness of
227 a promoted object differs from our extension. */
229 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
230 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
231 return convert_modes (mode
, oldmode
, op
, unsignedp
);
233 /* If MODE is no wider than a single word, we return a paradoxical
235 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
236 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
238 /* Otherwise, get an object of MODE, clobber it, and set the low-order
241 result
= gen_reg_rtx (mode
);
242 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
243 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
247 /* Generate code to perform a straightforward complex divide. */
250 expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
251 unsignedp
, methods
, class, binoptab
)
252 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
253 enum machine_mode submode
;
255 enum optab_methods methods
;
256 enum mode_class
class;
263 optab this_add_optab
= add_optab
;
264 optab this_sub_optab
= sub_optab
;
265 optab this_neg_optab
= neg_optab
;
266 optab this_mul_optab
= smul_optab
;
268 if (binoptab
== sdivv_optab
)
270 this_add_optab
= addv_optab
;
271 this_sub_optab
= subv_optab
;
272 this_neg_optab
= negv_optab
;
273 this_mul_optab
= smulv_optab
;
276 /* Don't fetch these from memory more than once. */
277 real0
= force_reg (submode
, real0
);
278 real1
= force_reg (submode
, real1
);
281 imag0
= force_reg (submode
, imag0
);
283 imag1
= force_reg (submode
, imag1
);
285 /* Divisor: c*c + d*d. */
286 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
287 NULL_RTX
, unsignedp
, methods
);
289 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
290 NULL_RTX
, unsignedp
, methods
);
292 if (temp1
== 0 || temp2
== 0)
295 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
296 NULL_RTX
, unsignedp
, methods
);
302 /* Mathematically, ((a)(c-id))/divisor. */
303 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
305 /* Calculate the dividend. */
306 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
307 NULL_RTX
, unsignedp
, methods
);
309 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
310 NULL_RTX
, unsignedp
, methods
);
312 if (real_t
== 0 || imag_t
== 0)
315 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
316 NULL_RTX
, unsignedp
);
320 /* Mathematically, ((a+ib)(c-id))/divider. */
321 /* Calculate the dividend. */
322 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
323 NULL_RTX
, unsignedp
, methods
);
325 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
326 NULL_RTX
, unsignedp
, methods
);
328 if (temp1
== 0 || temp2
== 0)
331 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
332 NULL_RTX
, unsignedp
, methods
);
334 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
335 NULL_RTX
, unsignedp
, methods
);
337 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
338 NULL_RTX
, unsignedp
, methods
);
340 if (temp1
== 0 || temp2
== 0)
343 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
344 NULL_RTX
, unsignedp
, methods
);
346 if (real_t
== 0 || imag_t
== 0)
350 if (class == MODE_COMPLEX_FLOAT
)
351 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
352 realr
, unsignedp
, methods
);
354 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
355 real_t
, divisor
, realr
, unsignedp
);
361 emit_move_insn (realr
, res
);
363 if (class == MODE_COMPLEX_FLOAT
)
364 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
365 imagr
, unsignedp
, methods
);
367 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
368 imag_t
, divisor
, imagr
, unsignedp
);
374 emit_move_insn (imagr
, res
);
379 /* Generate code to perform a wide-input-range-acceptable complex divide. */
382 expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
, realr
, imagr
, submode
,
383 unsignedp
, methods
, class, binoptab
)
384 rtx real0
, real1
, imag0
, imag1
, realr
, imagr
;
385 enum machine_mode submode
;
387 enum optab_methods methods
;
388 enum mode_class
class;
393 rtx temp1
, temp2
, lab1
, lab2
;
394 enum machine_mode mode
;
396 optab this_add_optab
= add_optab
;
397 optab this_sub_optab
= sub_optab
;
398 optab this_neg_optab
= neg_optab
;
399 optab this_mul_optab
= smul_optab
;
401 if (binoptab
== sdivv_optab
)
403 this_add_optab
= addv_optab
;
404 this_sub_optab
= subv_optab
;
405 this_neg_optab
= negv_optab
;
406 this_mul_optab
= smulv_optab
;
409 /* Don't fetch these from memory more than once. */
410 real0
= force_reg (submode
, real0
);
411 real1
= force_reg (submode
, real1
);
414 imag0
= force_reg (submode
, imag0
);
416 imag1
= force_reg (submode
, imag1
);
418 /* XXX What's an "unsigned" complex number? */
426 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
427 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
430 if (temp1
== 0 || temp2
== 0)
433 mode
= GET_MODE (temp1
);
434 lab1
= gen_label_rtx ();
435 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
436 mode
, unsignedp
, lab1
);
438 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
440 if (class == MODE_COMPLEX_FLOAT
)
441 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
442 NULL_RTX
, unsignedp
, methods
);
444 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
445 imag1
, real1
, NULL_RTX
, unsignedp
);
450 /* Calculate divisor. */
452 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
453 NULL_RTX
, unsignedp
, methods
);
458 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
459 NULL_RTX
, unsignedp
, methods
);
464 /* Calculate dividend. */
470 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
472 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
473 NULL_RTX
, unsignedp
, methods
);
478 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
479 NULL_RTX
, unsignedp
);
481 if (real_t
== 0 || imag_t
== 0)
486 /* Compute (a+ib)/(c+id) as
487 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
489 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
490 NULL_RTX
, unsignedp
, methods
);
495 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
496 NULL_RTX
, unsignedp
, methods
);
498 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
499 NULL_RTX
, unsignedp
, methods
);
504 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
505 NULL_RTX
, unsignedp
, methods
);
507 if (real_t
== 0 || imag_t
== 0)
511 if (class == MODE_COMPLEX_FLOAT
)
512 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
513 realr
, unsignedp
, methods
);
515 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
516 real_t
, divisor
, realr
, unsignedp
);
522 emit_move_insn (realr
, res
);
524 if (class == MODE_COMPLEX_FLOAT
)
525 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
526 imagr
, unsignedp
, methods
);
528 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
529 imag_t
, divisor
, imagr
, unsignedp
);
535 emit_move_insn (imagr
, res
);
537 lab2
= gen_label_rtx ();
538 emit_jump_insn (gen_jump (lab2
));
543 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
545 if (class == MODE_COMPLEX_FLOAT
)
546 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
547 NULL_RTX
, unsignedp
, methods
);
549 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
550 real1
, imag1
, NULL_RTX
, unsignedp
);
555 /* Calculate divisor. */
557 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
558 NULL_RTX
, unsignedp
, methods
);
563 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
564 NULL_RTX
, unsignedp
, methods
);
569 /* Calculate dividend. */
573 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
575 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
576 NULL_RTX
, unsignedp
, methods
);
578 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
579 NULL_RTX
, unsignedp
);
581 if (real_t
== 0 || imag_t
== 0)
586 /* Compute (a+ib)/(c+id) as
587 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
589 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
590 NULL_RTX
, unsignedp
, methods
);
595 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
596 NULL_RTX
, unsignedp
, methods
);
598 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
599 NULL_RTX
, unsignedp
, methods
);
604 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
605 NULL_RTX
, unsignedp
, methods
);
607 if (real_t
== 0 || imag_t
== 0)
611 if (class == MODE_COMPLEX_FLOAT
)
612 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
613 realr
, unsignedp
, methods
);
615 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
616 real_t
, divisor
, realr
, unsignedp
);
622 emit_move_insn (realr
, res
);
624 if (class == MODE_COMPLEX_FLOAT
)
625 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
626 imagr
, unsignedp
, methods
);
628 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
629 imag_t
, divisor
, imagr
, unsignedp
);
635 emit_move_insn (imagr
, res
);
642 /* Wrapper around expand_binop which takes an rtx code to specify
643 the operation to perform, not an optab pointer. All other
644 arguments are the same. */
646 expand_simple_binop (mode
, code
, op0
, op1
, target
, unsignedp
, methods
)
647 enum machine_mode mode
;
652 enum optab_methods methods
;
654 optab binop
= code_to_optab
[(int) code
];
658 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
661 /* Generate code to perform an operation specified by BINOPTAB
662 on operands OP0 and OP1, with result having machine-mode MODE.
664 UNSIGNEDP is for the case where we have to widen the operands
665 to perform the operation. It says to use zero-extension.
667 If TARGET is nonzero, the value
668 is generated there, if it is convenient to do so.
669 In all cases an rtx is returned for the locus of the value;
670 this may or may not be TARGET. */
673 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
674 enum machine_mode mode
;
679 enum optab_methods methods
;
681 enum optab_methods next_methods
682 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
683 ? OPTAB_WIDEN
: methods
);
684 enum mode_class
class;
685 enum machine_mode wider_mode
;
687 int commutative_op
= 0;
688 int shift_op
= (binoptab
->code
== ASHIFT
689 || binoptab
->code
== ASHIFTRT
690 || binoptab
->code
== LSHIFTRT
691 || binoptab
->code
== ROTATE
692 || binoptab
->code
== ROTATERT
);
693 rtx entry_last
= get_last_insn ();
696 class = GET_MODE_CLASS (mode
);
698 op0
= protect_from_queue (op0
, 0);
699 op1
= protect_from_queue (op1
, 0);
701 target
= protect_from_queue (target
, 1);
705 op0
= force_not_mem (op0
);
706 op1
= force_not_mem (op1
);
709 /* If subtracting an integer constant, convert this into an addition of
710 the negated constant. */
712 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
714 op1
= negate_rtx (mode
, op1
);
715 binoptab
= add_optab
;
718 /* If we are inside an appropriately-short loop and one operand is an
719 expensive constant, force it into a register. */
720 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
721 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
722 op0
= force_reg (mode
, op0
);
724 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
725 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
726 op1
= force_reg (mode
, op1
);
728 /* Record where to delete back to if we backtrack. */
729 last
= get_last_insn ();
731 /* If operation is commutative,
732 try to make the first operand a register.
733 Even better, try to make it the same as the target.
734 Also try to make the last operand a constant. */
735 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
736 || binoptab
== smul_widen_optab
737 || binoptab
== umul_widen_optab
738 || binoptab
== smul_highpart_optab
739 || binoptab
== umul_highpart_optab
)
743 if (((target
== 0 || GET_CODE (target
) == REG
)
744 ? ((GET_CODE (op1
) == REG
745 && GET_CODE (op0
) != REG
)
747 : rtx_equal_p (op1
, target
))
748 || GET_CODE (op0
) == CONST_INT
)
756 /* If we can do it with a three-operand insn, do so. */
758 if (methods
!= OPTAB_MUST_WIDEN
759 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
761 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
762 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
763 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
765 rtx xop0
= op0
, xop1
= op1
;
770 temp
= gen_reg_rtx (mode
);
772 /* If it is a commutative operator and the modes would match
773 if we would swap the operands, we can save the conversions. */
776 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
777 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
781 tmp
= op0
; op0
= op1
; op1
= tmp
;
782 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
786 /* In case the insn wants input operands in modes different from
787 those of the actual operands, convert the operands. It would
788 seem that we don't need to convert CONST_INTs, but we do, so
789 that they're properly zero-extended, sign-extended or truncated
792 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
793 xop0
= convert_modes (mode0
,
794 GET_MODE (op0
) != VOIDmode
799 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
800 xop1
= convert_modes (mode1
,
801 GET_MODE (op1
) != VOIDmode
806 /* Now, if insn's predicates don't allow our operands, put them into
809 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
810 && mode0
!= VOIDmode
)
811 xop0
= copy_to_mode_reg (mode0
, xop0
);
813 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
814 && mode1
!= VOIDmode
)
815 xop1
= copy_to_mode_reg (mode1
, xop1
);
817 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
818 temp
= gen_reg_rtx (mode
);
820 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
823 /* If PAT is composed of more than one insn, try to add an appropriate
824 REG_EQUAL note to it. If we can't because TEMP conflicts with an
825 operand, call ourselves again, this time without a target. */
826 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
827 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
829 delete_insns_since (last
);
830 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
838 delete_insns_since (last
);
841 /* If this is a multiply, see if we can do a widening operation that
842 takes operands of this mode and makes a wider mode. */
844 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
845 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
846 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
847 != CODE_FOR_nothing
))
849 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
850 unsignedp
? umul_widen_optab
: smul_widen_optab
,
851 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
855 if (GET_MODE_CLASS (mode
) == MODE_INT
)
856 return gen_lowpart (mode
, temp
);
858 return convert_to_mode (mode
, temp
, unsignedp
);
862 /* Look for a wider mode of the same class for which we think we
863 can open-code the operation. Check for a widening multiply at the
864 wider mode as well. */
866 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
867 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
868 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
869 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
871 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
872 || (binoptab
== smul_optab
873 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
874 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
875 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
876 != CODE_FOR_nothing
)))
878 rtx xop0
= op0
, xop1
= op1
;
881 /* For certain integer operations, we need not actually extend
882 the narrow operands, as long as we will truncate
883 the results to the same narrowness. */
885 if ((binoptab
== ior_optab
|| binoptab
== and_optab
886 || binoptab
== xor_optab
887 || binoptab
== add_optab
|| binoptab
== sub_optab
888 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
889 && class == MODE_INT
)
892 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
894 /* The second operand of a shift must always be extended. */
895 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
896 no_extend
&& binoptab
!= ashl_optab
);
898 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
899 unsignedp
, OPTAB_DIRECT
);
902 if (class != MODE_INT
)
905 target
= gen_reg_rtx (mode
);
906 convert_move (target
, temp
, 0);
910 return gen_lowpart (mode
, temp
);
913 delete_insns_since (last
);
917 /* These can be done a word at a time. */
918 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
920 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
921 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
927 /* If TARGET is the same as one of the operands, the REG_EQUAL note
928 won't be accurate, so use a new target. */
929 if (target
== 0 || target
== op0
|| target
== op1
)
930 target
= gen_reg_rtx (mode
);
934 /* Do the actual arithmetic. */
935 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
937 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
938 rtx x
= expand_binop (word_mode
, binoptab
,
939 operand_subword_force (op0
, i
, mode
),
940 operand_subword_force (op1
, i
, mode
),
941 target_piece
, unsignedp
, next_methods
);
946 if (target_piece
!= x
)
947 emit_move_insn (target_piece
, x
);
950 insns
= get_insns ();
953 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
955 if (binoptab
->code
!= UNKNOWN
)
957 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
958 copy_rtx (op0
), copy_rtx (op1
));
962 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
967 /* Synthesize double word shifts from single word shifts. */
968 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
969 || binoptab
== ashr_optab
)
971 && GET_CODE (op1
) == CONST_INT
972 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
973 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
974 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
975 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
977 rtx insns
, inter
, equiv_value
;
978 rtx into_target
, outof_target
;
979 rtx into_input
, outof_input
;
980 int shift_count
, left_shift
, outof_word
;
982 /* If TARGET is the same as one of the operands, the REG_EQUAL note
983 won't be accurate, so use a new target. */
984 if (target
== 0 || target
== op0
|| target
== op1
)
985 target
= gen_reg_rtx (mode
);
989 shift_count
= INTVAL (op1
);
991 /* OUTOF_* is the word we are shifting bits away from, and
992 INTO_* is the word that we are shifting bits towards, thus
993 they differ depending on the direction of the shift and
996 left_shift
= binoptab
== ashl_optab
;
997 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
999 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1000 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1002 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1003 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1005 if (shift_count
>= BITS_PER_WORD
)
1007 inter
= expand_binop (word_mode
, binoptab
,
1009 GEN_INT (shift_count
- BITS_PER_WORD
),
1010 into_target
, unsignedp
, next_methods
);
1012 if (inter
!= 0 && inter
!= into_target
)
1013 emit_move_insn (into_target
, inter
);
1015 /* For a signed right shift, we must fill the word we are shifting
1016 out of with copies of the sign bit. Otherwise it is zeroed. */
1017 if (inter
!= 0 && binoptab
!= ashr_optab
)
1018 inter
= CONST0_RTX (word_mode
);
1019 else if (inter
!= 0)
1020 inter
= expand_binop (word_mode
, binoptab
,
1022 GEN_INT (BITS_PER_WORD
- 1),
1023 outof_target
, unsignedp
, next_methods
);
1025 if (inter
!= 0 && inter
!= outof_target
)
1026 emit_move_insn (outof_target
, inter
);
1031 optab reverse_unsigned_shift
, unsigned_shift
;
1033 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1034 we must do a logical shift in the opposite direction of the
1037 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1039 /* For a shift of less than BITS_PER_WORD, to compute the word
1040 shifted towards, we need to unsigned shift the orig value of
1043 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1045 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1047 GEN_INT (BITS_PER_WORD
- shift_count
),
1048 0, unsignedp
, next_methods
);
1053 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1054 op1
, 0, unsignedp
, next_methods
);
1057 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1058 into_target
, unsignedp
, next_methods
);
1060 if (inter
!= 0 && inter
!= into_target
)
1061 emit_move_insn (into_target
, inter
);
1064 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1065 op1
, outof_target
, unsignedp
, next_methods
);
1067 if (inter
!= 0 && inter
!= outof_target
)
1068 emit_move_insn (outof_target
, inter
);
1071 insns
= get_insns ();
1076 if (binoptab
->code
!= UNKNOWN
)
1077 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1081 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1086 /* Synthesize double word rotates from single word shifts. */
1087 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1088 && class == MODE_INT
1089 && GET_CODE (op1
) == CONST_INT
1090 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1091 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1092 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1094 rtx insns
, equiv_value
;
1095 rtx into_target
, outof_target
;
1096 rtx into_input
, outof_input
;
1098 int shift_count
, left_shift
, outof_word
;
1100 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1101 won't be accurate, so use a new target. */
1102 if (target
== 0 || target
== op0
|| target
== op1
)
1103 target
= gen_reg_rtx (mode
);
1107 shift_count
= INTVAL (op1
);
1109 /* OUTOF_* is the word we are shifting bits away from, and
1110 INTO_* is the word that we are shifting bits towards, thus
1111 they differ depending on the direction of the shift and
1112 WORDS_BIG_ENDIAN. */
1114 left_shift
= (binoptab
== rotl_optab
);
1115 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1117 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1118 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1120 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1121 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1123 if (shift_count
== BITS_PER_WORD
)
1125 /* This is just a word swap. */
1126 emit_move_insn (outof_target
, into_input
);
1127 emit_move_insn (into_target
, outof_input
);
1132 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1133 rtx first_shift_count
, second_shift_count
;
1134 optab reverse_unsigned_shift
, unsigned_shift
;
1136 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1137 ? lshr_optab
: ashl_optab
);
1139 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1140 ? ashl_optab
: lshr_optab
);
1142 if (shift_count
> BITS_PER_WORD
)
1144 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1145 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1149 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1150 second_shift_count
= GEN_INT (shift_count
);
1153 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1154 outof_input
, first_shift_count
,
1155 NULL_RTX
, unsignedp
, next_methods
);
1156 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1157 into_input
, second_shift_count
,
1158 NULL_RTX
, unsignedp
, next_methods
);
1160 if (into_temp1
!= 0 && into_temp2
!= 0)
1161 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1162 into_target
, unsignedp
, next_methods
);
1166 if (inter
!= 0 && inter
!= into_target
)
1167 emit_move_insn (into_target
, inter
);
1169 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1170 into_input
, first_shift_count
,
1171 NULL_RTX
, unsignedp
, next_methods
);
1172 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1173 outof_input
, second_shift_count
,
1174 NULL_RTX
, unsignedp
, next_methods
);
1176 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1177 inter
= expand_binop (word_mode
, ior_optab
,
1178 outof_temp1
, outof_temp2
,
1179 outof_target
, unsignedp
, next_methods
);
1181 if (inter
!= 0 && inter
!= outof_target
)
1182 emit_move_insn (outof_target
, inter
);
1185 insns
= get_insns ();
1190 if (binoptab
->code
!= UNKNOWN
)
1191 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1195 /* We can't make this a no conflict block if this is a word swap,
1196 because the word swap case fails if the input and output values
1197 are in the same register. */
1198 if (shift_count
!= BITS_PER_WORD
)
1199 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1208 /* These can be done a word at a time by propagating carries. */
1209 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1210 && class == MODE_INT
1211 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1212 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1215 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1216 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1217 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1218 rtx xop0
, xop1
, xtarget
;
1220 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1221 value is one of those, use it. Otherwise, use 1 since it is the
1222 one easiest to get. */
1223 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1224 int normalizep
= STORE_FLAG_VALUE
;
1229 /* Prepare the operands. */
1230 xop0
= force_reg (mode
, op0
);
1231 xop1
= force_reg (mode
, op1
);
1233 xtarget
= gen_reg_rtx (mode
);
1235 if (target
== 0 || GET_CODE (target
) != REG
)
1238 /* Indicate for flow that the entire target reg is being set. */
1239 if (GET_CODE (target
) == REG
)
1240 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1242 /* Do the actual arithmetic. */
1243 for (i
= 0; i
< nwords
; i
++)
1245 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1246 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1247 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1248 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1251 /* Main add/subtract of the input operands. */
1252 x
= expand_binop (word_mode
, binoptab
,
1253 op0_piece
, op1_piece
,
1254 target_piece
, unsignedp
, next_methods
);
1260 /* Store carry from main add/subtract. */
1261 carry_out
= gen_reg_rtx (word_mode
);
1262 carry_out
= emit_store_flag_force (carry_out
,
1263 (binoptab
== add_optab
1266 word_mode
, 1, normalizep
);
1273 /* Add/subtract previous carry to main result. */
1274 newx
= expand_binop (word_mode
,
1275 normalizep
== 1 ? binoptab
: otheroptab
,
1277 NULL_RTX
, 1, next_methods
);
1281 /* Get out carry from adding/subtracting carry in. */
1282 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1283 carry_tmp
= emit_store_flag_force (carry_tmp
,
1284 (binoptab
== add_optab
1287 word_mode
, 1, normalizep
);
1289 /* Logical-ior the two poss. carry together. */
1290 carry_out
= expand_binop (word_mode
, ior_optab
,
1291 carry_out
, carry_tmp
,
1292 carry_out
, 0, next_methods
);
1296 emit_move_insn (target_piece
, newx
);
1299 carry_in
= carry_out
;
1302 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1304 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1306 rtx temp
= emit_move_insn (target
, xtarget
);
1308 set_unique_reg_note (temp
,
1310 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1321 delete_insns_since (last
);
1324 /* If we want to multiply two two-word values and have normal and widening
1325 multiplies of single-word values, we can do this with three smaller
1326 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1327 because we are not operating on one word at a time.
1329 The multiplication proceeds as follows:
1330 _______________________
1331 [__op0_high_|__op0_low__]
1332 _______________________
1333 * [__op1_high_|__op1_low__]
1334 _______________________________________________
1335 _______________________
1336 (1) [__op0_low__*__op1_low__]
1337 _______________________
1338 (2a) [__op0_low__*__op1_high_]
1339 _______________________
1340 (2b) [__op0_high_*__op1_low__]
1341 _______________________
1342 (3) [__op0_high_*__op1_high_]
1345 This gives a 4-word result. Since we are only interested in the
1346 lower 2 words, partial result (3) and the upper words of (2a) and
1347 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1348 calculated using non-widening multiplication.
1350 (1), however, needs to be calculated with an unsigned widening
1351 multiplication. If this operation is not directly supported we
1352 try using a signed widening multiplication and adjust the result.
1353 This adjustment works as follows:
1355 If both operands are positive then no adjustment is needed.
1357 If the operands have different signs, for example op0_low < 0 and
1358 op1_low >= 0, the instruction treats the most significant bit of
1359 op0_low as a sign bit instead of a bit with significance
1360 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1361 with 2**BITS_PER_WORD - op0_low, and two's complements the
1362 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1365 Similarly, if both operands are negative, we need to add
1366 (op0_low + op1_low) * 2**BITS_PER_WORD.
1368 We use a trick to adjust quickly. We logically shift op0_low right
1369 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1370 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1371 logical shift exists, we do an arithmetic right shift and subtract
1374 if (binoptab
== smul_optab
1375 && class == MODE_INT
1376 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1377 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1378 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1379 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1380 != CODE_FOR_nothing
)
1381 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1382 != CODE_FOR_nothing
)))
1384 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1385 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1386 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1387 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1388 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1389 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1391 rtx op0_xhigh
= NULL_RTX
;
1392 rtx op1_xhigh
= NULL_RTX
;
1394 /* If the target is the same as one of the inputs, don't use it. This
1395 prevents problems with the REG_EQUAL note. */
1396 if (target
== op0
|| target
== op1
1397 || (target
!= 0 && GET_CODE (target
) != REG
))
1400 /* Multiply the two lower words to get a double-word product.
1401 If unsigned widening multiplication is available, use that;
1402 otherwise use the signed form and compensate. */
1404 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1406 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1407 target
, 1, OPTAB_DIRECT
);
1409 /* If we didn't succeed, delete everything we did so far. */
1411 delete_insns_since (last
);
1413 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1417 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1418 != CODE_FOR_nothing
)
1420 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1421 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1422 target
, 1, OPTAB_DIRECT
);
1423 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1424 NULL_RTX
, 1, next_methods
);
1426 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1427 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1430 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1431 NULL_RTX
, 0, next_methods
);
1433 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1434 op0_xhigh
, op0_xhigh
, 0,
1438 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1439 NULL_RTX
, 1, next_methods
);
1441 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1442 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1445 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1446 NULL_RTX
, 0, next_methods
);
1448 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1449 op1_xhigh
, op1_xhigh
, 0,
1454 /* If we have been able to directly compute the product of the
1455 low-order words of the operands and perform any required adjustments
1456 of the operands, we proceed by trying two more multiplications
1457 and then computing the appropriate sum.
1459 We have checked above that the required addition is provided.
1460 Full-word addition will normally always succeed, especially if
1461 it is provided at all, so we don't worry about its failure. The
1462 multiplication may well fail, however, so we do handle that. */
1464 if (product
&& op0_xhigh
&& op1_xhigh
)
1466 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1467 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1468 NULL_RTX
, 0, OPTAB_DIRECT
);
1470 if (!REG_P (product_high
))
1471 product_high
= force_reg (word_mode
, product_high
);
1474 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1475 product_high
, 0, next_methods
);
1477 if (temp
!= 0 && temp
!= product_high
)
1478 emit_move_insn (product_high
, temp
);
1481 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1482 NULL_RTX
, 0, OPTAB_DIRECT
);
1485 temp
= expand_binop (word_mode
, add_optab
, temp
,
1486 product_high
, product_high
,
1489 if (temp
!= 0 && temp
!= product_high
)
1490 emit_move_insn (product_high
, temp
);
1492 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1496 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1498 temp
= emit_move_insn (product
, product
);
1499 set_unique_reg_note (temp
,
1501 gen_rtx_fmt_ee (MULT
, mode
,
1510 /* If we get here, we couldn't do it for some reason even though we
1511 originally thought we could. Delete anything we've emitted in
1514 delete_insns_since (last
);
1517 /* Open-code the vector operations if we have no hardware support
1519 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1520 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1521 unsignedp
, methods
);
1523 /* We need to open-code the complex type operations: '+, -, * and /' */
1525 /* At this point we allow operations between two similar complex
1526 numbers, and also if one of the operands is not a complex number
1527 but rather of MODE_FLOAT or MODE_INT. However, the caller
1528 must make sure that the MODE of the non-complex operand matches
1529 the SUBMODE of the complex operand. */
1531 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1533 rtx real0
= 0, imag0
= 0;
1534 rtx real1
= 0, imag1
= 0;
1535 rtx realr
, imagr
, res
;
1540 /* Find the correct mode for the real and imaginary parts */
1541 enum machine_mode submode
= GET_MODE_INNER(mode
);
1543 if (submode
== BLKmode
)
1547 target
= gen_reg_rtx (mode
);
1551 realr
= gen_realpart (submode
, target
);
1552 imagr
= gen_imagpart (submode
, target
);
1554 if (GET_MODE (op0
) == mode
)
1556 real0
= gen_realpart (submode
, op0
);
1557 imag0
= gen_imagpart (submode
, op0
);
1562 if (GET_MODE (op1
) == mode
)
1564 real1
= gen_realpart (submode
, op1
);
1565 imag1
= gen_imagpart (submode
, op1
);
1570 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1573 switch (binoptab
->code
)
1576 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1578 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1579 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1580 realr
, unsignedp
, methods
);
1584 else if (res
!= realr
)
1585 emit_move_insn (realr
, res
);
1587 if (imag0
!= 0 && imag1
!= 0)
1588 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1589 imagr
, unsignedp
, methods
);
1590 else if (imag0
!= 0)
1592 else if (binoptab
->code
== MINUS
)
1593 res
= expand_unop (submode
,
1594 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1595 imag1
, imagr
, unsignedp
);
1601 else if (res
!= imagr
)
1602 emit_move_insn (imagr
, res
);
1608 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1610 if (imag0
!= 0 && imag1
!= 0)
1614 /* Don't fetch these from memory more than once. */
1615 real0
= force_reg (submode
, real0
);
1616 real1
= force_reg (submode
, real1
);
1617 imag0
= force_reg (submode
, imag0
);
1618 imag1
= force_reg (submode
, imag1
);
1620 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1621 unsignedp
, methods
);
1623 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1624 unsignedp
, methods
);
1626 if (temp1
== 0 || temp2
== 0)
1631 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1632 temp1
, temp2
, realr
, unsignedp
, methods
));
1636 else if (res
!= realr
)
1637 emit_move_insn (realr
, res
);
1639 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1640 NULL_RTX
, unsignedp
, methods
);
1642 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1643 NULL_RTX
, unsignedp
, methods
);
1645 if (temp1
== 0 || temp2
== 0)
1650 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1651 temp1
, temp2
, imagr
, unsignedp
, methods
));
1655 else if (res
!= imagr
)
1656 emit_move_insn (imagr
, res
);
1662 /* Don't fetch these from memory more than once. */
1663 real0
= force_reg (submode
, real0
);
1664 real1
= force_reg (submode
, real1
);
1666 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1667 realr
, unsignedp
, methods
);
1670 else if (res
!= realr
)
1671 emit_move_insn (realr
, res
);
1674 res
= expand_binop (submode
, binoptab
,
1675 real1
, imag0
, imagr
, unsignedp
, methods
);
1677 res
= expand_binop (submode
, binoptab
,
1678 real0
, imag1
, imagr
, unsignedp
, methods
);
1682 else if (res
!= imagr
)
1683 emit_move_insn (imagr
, res
);
1690 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1694 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1696 /* Don't fetch these from memory more than once. */
1697 real1
= force_reg (submode
, real1
);
1699 /* Simply divide the real and imaginary parts by `c' */
1700 if (class == MODE_COMPLEX_FLOAT
)
1701 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1702 realr
, unsignedp
, methods
);
1704 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1705 real0
, real1
, realr
, unsignedp
);
1709 else if (res
!= realr
)
1710 emit_move_insn (realr
, res
);
1712 if (class == MODE_COMPLEX_FLOAT
)
1713 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1714 imagr
, unsignedp
, methods
);
1716 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1717 imag0
, real1
, imagr
, unsignedp
);
1721 else if (res
!= imagr
)
1722 emit_move_insn (imagr
, res
);
1728 switch (flag_complex_divide_method
)
1731 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1732 realr
, imagr
, submode
,
1738 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1739 realr
, imagr
, submode
,
1759 if (binoptab
->code
!= UNKNOWN
)
1761 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1762 copy_rtx (op0
), copy_rtx (op1
));
1766 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1772 /* It can't be open-coded in this mode.
1773 Use a library call if one is available and caller says that's ok. */
1775 if (binoptab
->handlers
[(int) mode
].libfunc
1776 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1780 enum machine_mode op1_mode
= mode
;
1787 op1_mode
= word_mode
;
1788 /* Specify unsigned here,
1789 since negative shift counts are meaningless. */
1790 op1x
= convert_to_mode (word_mode
, op1
, 1);
1793 if (GET_MODE (op0
) != VOIDmode
1794 && GET_MODE (op0
) != mode
)
1795 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1797 /* Pass 1 for NO_QUEUE so we don't lose any increments
1798 if the libcall is cse'd or moved. */
1799 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1800 NULL_RTX
, LCT_CONST
, mode
, 2,
1801 op0
, mode
, op1x
, op1_mode
);
1803 insns
= get_insns ();
1806 target
= gen_reg_rtx (mode
);
1807 emit_libcall_block (insns
, target
, value
,
1808 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1813 delete_insns_since (last
);
1815 /* It can't be done in this mode. Can we do it in a wider mode? */
1817 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1818 || methods
== OPTAB_MUST_WIDEN
))
1820 /* Caller says, don't even try. */
1821 delete_insns_since (entry_last
);
1825 /* Compute the value of METHODS to pass to recursive calls.
1826 Don't allow widening to be tried recursively. */
1828 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1830 /* Look for a wider mode of the same class for which it appears we can do
1833 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1835 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1836 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1838 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1839 != CODE_FOR_nothing
)
1840 || (methods
== OPTAB_LIB
1841 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1843 rtx xop0
= op0
, xop1
= op1
;
1846 /* For certain integer operations, we need not actually extend
1847 the narrow operands, as long as we will truncate
1848 the results to the same narrowness. */
1850 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1851 || binoptab
== xor_optab
1852 || binoptab
== add_optab
|| binoptab
== sub_optab
1853 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1854 && class == MODE_INT
)
1857 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1858 unsignedp
, no_extend
);
1860 /* The second operand of a shift must always be extended. */
1861 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1862 no_extend
&& binoptab
!= ashl_optab
);
1864 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1865 unsignedp
, methods
);
1868 if (class != MODE_INT
)
1871 target
= gen_reg_rtx (mode
);
1872 convert_move (target
, temp
, 0);
1876 return gen_lowpart (mode
, temp
);
1879 delete_insns_since (last
);
1884 delete_insns_since (entry_last
);
1888 /* Like expand_binop, but for open-coding vectors binops. */
1891 expand_vector_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
1892 enum machine_mode mode
;
1897 enum optab_methods methods
;
1899 enum machine_mode submode
, tmode
;
1900 int size
, elts
, subsize
, subbitsize
, i
;
1901 rtx t
, a
, b
, res
, seq
;
1902 enum mode_class
class;
1904 class = GET_MODE_CLASS (mode
);
1906 size
= GET_MODE_SIZE (mode
);
1907 submode
= GET_MODE_INNER (mode
);
1909 /* Search for the widest vector mode with the same inner mode that is
1910 still narrower than MODE and that allows to open-code this operator.
1911 Note, if we find such a mode and the handler later decides it can't
1912 do the expansion, we'll be called recursively with the narrower mode. */
1913 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1914 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1915 tmode
= GET_MODE_WIDER_MODE (tmode
))
1917 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1918 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1922 switch (binoptab
->code
)
1927 tmode
= int_mode_for_mode (mode
);
1928 if (tmode
!= BLKmode
)
1934 subsize
= GET_MODE_SIZE (submode
);
1935 subbitsize
= GET_MODE_BITSIZE (submode
);
1936 elts
= size
/ subsize
;
1938 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1939 but that we operate on more than one element at a time. */
1940 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1945 /* Errors can leave us with a const0_rtx as operand. */
1946 if (GET_MODE (op0
) != mode
)
1947 op0
= copy_to_mode_reg (mode
, op0
);
1948 if (GET_MODE (op1
) != mode
)
1949 op1
= copy_to_mode_reg (mode
, op1
);
1952 target
= gen_reg_rtx (mode
);
1954 for (i
= 0; i
< elts
; ++i
)
1956 /* If this is part of a register, and not the first item in the
1957 word, we can't store using a SUBREG - that would clobber
1959 And storing with a SUBREG is only possible for the least
1960 significant part, hence we can't do it for big endian
1961 (unless we want to permute the evaluation order. */
1962 if (GET_CODE (target
) == REG
1963 && (BYTES_BIG_ENDIAN
1964 ? subsize
< UNITS_PER_WORD
1965 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1968 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1969 if (CONSTANT_P (op0
))
1970 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1972 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1973 NULL_RTX
, submode
, submode
, size
);
1974 if (CONSTANT_P (op1
))
1975 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1977 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1978 NULL_RTX
, submode
, submode
, size
);
1980 if (binoptab
->code
== DIV
)
1982 if (class == MODE_VECTOR_FLOAT
)
1983 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1984 unsignedp
, methods
);
1986 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1987 a
, b
, t
, unsignedp
);
1990 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1991 unsignedp
, methods
);
1997 emit_move_insn (t
, res
);
1999 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2015 /* Like expand_unop but for open-coding vector unops. */
2018 expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2019 enum machine_mode mode
;
2025 enum machine_mode submode
, tmode
;
2026 int size
, elts
, subsize
, subbitsize
, i
;
2029 size
= GET_MODE_SIZE (mode
);
2030 submode
= GET_MODE_INNER (mode
);
2032 /* Search for the widest vector mode with the same inner mode that is
2033 still narrower than MODE and that allows to open-code this operator.
2034 Note, if we find such a mode and the handler later decides it can't
2035 do the expansion, we'll be called recursively with the narrower mode. */
2036 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2037 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2038 tmode
= GET_MODE_WIDER_MODE (tmode
))
2040 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2041 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2044 /* If there is no negate operation, try doing a subtract from zero. */
2045 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2046 /* Avoid infinite recursion when an
2047 error has left us with the wrong mode. */
2048 && GET_MODE (op0
) == mode
)
2051 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2052 target
, unsignedp
, OPTAB_DIRECT
);
2057 if (unoptab
== one_cmpl_optab
)
2059 tmode
= int_mode_for_mode (mode
);
2060 if (tmode
!= BLKmode
)
2064 subsize
= GET_MODE_SIZE (submode
);
2065 subbitsize
= GET_MODE_BITSIZE (submode
);
2066 elts
= size
/ subsize
;
2068 /* Errors can leave us with a const0_rtx as operand. */
2069 if (GET_MODE (op0
) != mode
)
2070 op0
= copy_to_mode_reg (mode
, op0
);
2073 target
= gen_reg_rtx (mode
);
2077 for (i
= 0; i
< elts
; ++i
)
2079 /* If this is part of a register, and not the first item in the
2080 word, we can't store using a SUBREG - that would clobber
2082 And storing with a SUBREG is only possible for the least
2083 significant part, hence we can't do it for big endian
2084 (unless we want to permute the evaluation order. */
2085 if (GET_CODE (target
) == REG
2086 && (BYTES_BIG_ENDIAN
2087 ? subsize
< UNITS_PER_WORD
2088 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2091 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2092 if (CONSTANT_P (op0
))
2093 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2095 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2096 t
, submode
, submode
, size
);
2098 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2101 emit_move_insn (t
, res
);
2103 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2114 /* Expand a binary operator which has both signed and unsigned forms.
2115 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2118 If we widen unsigned operands, we may use a signed wider operation instead
2119 of an unsigned wider operation, since the result would be the same. */
2122 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
2123 enum machine_mode mode
;
2124 optab uoptab
, soptab
;
2125 rtx op0
, op1
, target
;
2127 enum optab_methods methods
;
2130 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2131 struct optab wide_soptab
;
2133 /* Do it without widening, if possible. */
2134 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2135 unsignedp
, OPTAB_DIRECT
);
2136 if (temp
|| methods
== OPTAB_DIRECT
)
2139 /* Try widening to a signed int. Make a fake signed optab that
2140 hides any signed insn for direct use. */
2141 wide_soptab
= *soptab
;
2142 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2143 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2145 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2146 unsignedp
, OPTAB_WIDEN
);
2148 /* For unsigned operands, try widening to an unsigned int. */
2149 if (temp
== 0 && unsignedp
)
2150 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2151 unsignedp
, OPTAB_WIDEN
);
2152 if (temp
|| methods
== OPTAB_WIDEN
)
2155 /* Use the right width lib call if that exists. */
2156 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2157 if (temp
|| methods
== OPTAB_LIB
)
2160 /* Must widen and use a lib call, use either signed or unsigned. */
2161 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2162 unsignedp
, methods
);
2166 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2167 unsignedp
, methods
);
2171 /* Generate code to perform an operation specified by BINOPTAB
2172 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2173 We assume that the order of the operands for the instruction
2174 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2175 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2177 Either TARG0 or TARG1 may be zero, but what that means is that
2178 the result is not actually wanted. We will generate it into
2179 a dummy pseudo-reg and discard it. They may not both be zero.
2181 Returns 1 if this operation can be performed; 0 if not. */
2184 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
2190 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2191 enum mode_class
class;
2192 enum machine_mode wider_mode
;
2193 rtx entry_last
= get_last_insn ();
2196 class = GET_MODE_CLASS (mode
);
2198 op0
= protect_from_queue (op0
, 0);
2199 op1
= protect_from_queue (op1
, 0);
2203 op0
= force_not_mem (op0
);
2204 op1
= force_not_mem (op1
);
2207 /* If we are inside an appropriately-short loop and one operand is an
2208 expensive constant, force it into a register. */
2209 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2210 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2211 op0
= force_reg (mode
, op0
);
2213 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2214 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2215 op1
= force_reg (mode
, op1
);
2218 targ0
= protect_from_queue (targ0
, 1);
2220 targ0
= gen_reg_rtx (mode
);
2222 targ1
= protect_from_queue (targ1
, 1);
2224 targ1
= gen_reg_rtx (mode
);
2226 /* Record where to go back to if we fail. */
2227 last
= get_last_insn ();
2229 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2231 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2232 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2233 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2235 rtx xop0
= op0
, xop1
= op1
;
2237 /* In case the insn wants input operands in modes different from
2238 those of the actual operands, convert the operands. It would
2239 seem that we don't need to convert CONST_INTs, but we do, so
2240 that they're properly zero-extended, sign-extended or truncated
2243 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2244 xop0
= convert_modes (mode0
,
2245 GET_MODE (op0
) != VOIDmode
2250 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2251 xop1
= convert_modes (mode1
,
2252 GET_MODE (op1
) != VOIDmode
2257 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2258 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2259 xop0
= copy_to_mode_reg (mode0
, xop0
);
2261 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2262 xop1
= copy_to_mode_reg (mode1
, xop1
);
2264 /* We could handle this, but we should always be called with a pseudo
2265 for our targets and all insns should take them as outputs. */
2266 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2267 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2270 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2277 delete_insns_since (last
);
2280 /* It can't be done in this mode. Can we do it in a wider mode? */
2282 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2284 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2285 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2287 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2288 != CODE_FOR_nothing
)
2290 rtx t0
= gen_reg_rtx (wider_mode
);
2291 rtx t1
= gen_reg_rtx (wider_mode
);
2292 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2293 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2295 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2298 convert_move (targ0
, t0
, unsignedp
);
2299 convert_move (targ1
, t1
, unsignedp
);
2303 delete_insns_since (last
);
2308 delete_insns_since (entry_last
);
2312 /* Wrapper around expand_unop which takes an rtx code to specify
2313 the operation to perform, not an optab pointer. All other
2314 arguments are the same. */
2316 expand_simple_unop (mode
, code
, op0
, target
, unsignedp
)
2317 enum machine_mode mode
;
2323 optab unop
= code_to_optab
[(int) code
];
2327 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2333 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2335 widen_clz (mode
, op0
, target
)
2336 enum machine_mode mode
;
2340 enum mode_class
class = GET_MODE_CLASS (mode
);
2341 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2343 enum machine_mode wider_mode
;
2344 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2345 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2347 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2348 != CODE_FOR_nothing
)
2350 rtx xop0
, temp
, last
;
2352 last
= get_last_insn ();
2355 target
= gen_reg_rtx (mode
);
2356 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2357 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2359 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2360 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2361 - GET_MODE_BITSIZE (mode
)),
2362 target
, true, OPTAB_DIRECT
);
2364 delete_insns_since (last
);
2373 /* Try calculating (parity x) as (and (popcount x) 1), where
2374 popcount can also be done in a wider mode. */
2376 expand_parity (mode
, op0
, target
)
2377 enum machine_mode mode
;
2381 enum mode_class
class = GET_MODE_CLASS (mode
);
2382 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2384 enum machine_mode wider_mode
;
2385 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2386 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2388 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2389 != CODE_FOR_nothing
)
2391 rtx xop0
, temp
, last
;
2393 last
= get_last_insn ();
2396 target
= gen_reg_rtx (mode
);
2397 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2398 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2401 temp
= expand_binop (wider_mode
, and_optab
, temp
, GEN_INT (1),
2402 target
, true, OPTAB_DIRECT
);
2404 delete_insns_since (last
);
2413 /* Generate code to perform an operation specified by UNOPTAB
2414 on operand OP0, with result having machine-mode MODE.
2416 UNSIGNEDP is for the case where we have to widen the operands
2417 to perform the operation. It says to use zero-extension.
2419 If TARGET is nonzero, the value
2420 is generated there, if it is convenient to do so.
2421 In all cases an rtx is returned for the locus of the value;
2422 this may or may not be TARGET. */
2425 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
2426 enum machine_mode mode
;
2432 enum mode_class
class;
2433 enum machine_mode wider_mode
;
2435 rtx last
= get_last_insn ();
2438 class = GET_MODE_CLASS (mode
);
2440 op0
= protect_from_queue (op0
, 0);
2444 op0
= force_not_mem (op0
);
2448 target
= protect_from_queue (target
, 1);
2450 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2452 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2453 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2459 temp
= gen_reg_rtx (mode
);
2461 if (GET_MODE (xop0
) != VOIDmode
2462 && GET_MODE (xop0
) != mode0
)
2463 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2465 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2467 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2468 xop0
= copy_to_mode_reg (mode0
, xop0
);
2470 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2471 temp
= gen_reg_rtx (mode
);
2473 pat
= GEN_FCN (icode
) (temp
, xop0
);
2476 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2477 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2479 delete_insns_since (last
);
2480 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2488 delete_insns_since (last
);
2491 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2493 /* Widening clz needs special treatment. */
2494 if (unoptab
== clz_optab
)
2496 temp
= widen_clz (mode
, op0
, target
);
2503 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2504 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2505 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2507 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2511 /* For certain operations, we need not actually extend
2512 the narrow operand, as long as we will truncate the
2513 results to the same narrowness. */
2515 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2516 (unoptab
== neg_optab
2517 || unoptab
== one_cmpl_optab
)
2518 && class == MODE_INT
);
2520 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2525 if (class != MODE_INT
)
2528 target
= gen_reg_rtx (mode
);
2529 convert_move (target
, temp
, 0);
2533 return gen_lowpart (mode
, temp
);
2536 delete_insns_since (last
);
2540 /* These can be done a word at a time. */
2541 if (unoptab
== one_cmpl_optab
2542 && class == MODE_INT
2543 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2544 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2549 if (target
== 0 || target
== op0
)
2550 target
= gen_reg_rtx (mode
);
2554 /* Do the actual arithmetic. */
2555 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2557 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2558 rtx x
= expand_unop (word_mode
, unoptab
,
2559 operand_subword_force (op0
, i
, mode
),
2560 target_piece
, unsignedp
);
2562 if (target_piece
!= x
)
2563 emit_move_insn (target_piece
, x
);
2566 insns
= get_insns ();
2569 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2570 gen_rtx_fmt_e (unoptab
->code
, mode
,
2575 /* Open-code the complex negation operation. */
2576 else if (unoptab
->code
== NEG
2577 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2583 /* Find the correct mode for the real and imaginary parts */
2584 enum machine_mode submode
= GET_MODE_INNER (mode
);
2586 if (submode
== BLKmode
)
2590 target
= gen_reg_rtx (mode
);
2594 target_piece
= gen_imagpart (submode
, target
);
2595 x
= expand_unop (submode
, unoptab
,
2596 gen_imagpart (submode
, op0
),
2597 target_piece
, unsignedp
);
2598 if (target_piece
!= x
)
2599 emit_move_insn (target_piece
, x
);
2601 target_piece
= gen_realpart (submode
, target
);
2602 x
= expand_unop (submode
, unoptab
,
2603 gen_realpart (submode
, op0
),
2604 target_piece
, unsignedp
);
2605 if (target_piece
!= x
)
2606 emit_move_insn (target_piece
, x
);
2611 emit_no_conflict_block (seq
, target
, op0
, 0,
2612 gen_rtx_fmt_e (unoptab
->code
, mode
,
2617 /* Try negating floating point values by flipping the sign bit. */
2618 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2619 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2621 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2622 enum machine_mode imode
= int_mode_for_mode (mode
);
2623 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2625 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2627 HOST_WIDE_INT hi
, lo
;
2628 rtx last
= get_last_insn ();
2630 /* Handle targets with different FP word orders. */
2631 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2633 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2634 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2635 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2638 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2641 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2645 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2648 temp
= expand_binop (imode
, xor_optab
,
2649 gen_lowpart (imode
, op0
),
2650 immed_double_const (lo
, hi
, imode
),
2651 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2653 return gen_lowpart (mode
, temp
);
2654 delete_insns_since (last
);
2658 /* Try calculating parity (x) as popcount (x) % 2. */
2659 if (unoptab
== parity_optab
)
2661 temp
= expand_parity (mode
, op0
, target
);
2667 /* Now try a library call in this mode. */
2668 if (unoptab
->handlers
[(int) mode
].libfunc
)
2672 enum machine_mode outmode
= mode
;
2674 /* All of these functions return small values. Thus we choose to
2675 have them return something that isn't a double-word. */
2676 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2677 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2678 outmode
= TYPE_MODE (integer_type_node
);
2682 /* Pass 1 for NO_QUEUE so we don't lose any increments
2683 if the libcall is cse'd or moved. */
2684 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2685 NULL_RTX
, LCT_CONST
, outmode
,
2687 insns
= get_insns ();
2690 target
= gen_reg_rtx (outmode
);
2691 emit_libcall_block (insns
, target
, value
,
2692 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2697 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2698 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2700 /* It can't be done in this mode. Can we do it in a wider mode? */
2702 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2704 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2705 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2707 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2708 != CODE_FOR_nothing
)
2709 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2713 /* For certain operations, we need not actually extend
2714 the narrow operand, as long as we will truncate the
2715 results to the same narrowness. */
2717 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2718 (unoptab
== neg_optab
2719 || unoptab
== one_cmpl_optab
)
2720 && class == MODE_INT
);
2722 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2725 /* If we are generating clz using wider mode, adjust the
2727 if (unoptab
== clz_optab
&& temp
!= 0)
2728 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2729 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2730 - GET_MODE_BITSIZE (mode
)),
2731 target
, true, OPTAB_DIRECT
);
2735 if (class != MODE_INT
)
2738 target
= gen_reg_rtx (mode
);
2739 convert_move (target
, temp
, 0);
2743 return gen_lowpart (mode
, temp
);
2746 delete_insns_since (last
);
2751 /* If there is no negate operation, try doing a subtract from zero.
2752 The US Software GOFAST library needs this. */
2753 if (unoptab
->code
== NEG
)
2756 temp
= expand_binop (mode
,
2757 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2758 CONST0_RTX (mode
), op0
,
2759 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2767 /* Emit code to compute the absolute value of OP0, with result to
2768 TARGET if convenient. (TARGET may be 0.) The return value says
2769 where the result actually is to be found.
2771 MODE is the mode of the operand; the mode of the result is
2772 different but can be deduced from MODE.
2777 expand_abs (mode
, op0
, target
, result_unsignedp
, safe
)
2778 enum machine_mode mode
;
2781 int result_unsignedp
;
2787 result_unsignedp
= 1;
2789 /* First try to do it with a special abs instruction. */
2790 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2795 /* For floating point modes, try clearing the sign bit. */
2796 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2797 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2799 const struct real_format
*fmt
= real_format_for_mode
[mode
- QFmode
];
2800 enum machine_mode imode
= int_mode_for_mode (mode
);
2801 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2803 if (imode
!= BLKmode
&& bitpos
>= 0)
2805 HOST_WIDE_INT hi
, lo
;
2806 rtx last
= get_last_insn ();
2808 /* Handle targets with different FP word orders. */
2809 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2811 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2812 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2813 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2816 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2819 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2823 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2826 temp
= expand_binop (imode
, and_optab
,
2827 gen_lowpart (imode
, op0
),
2828 immed_double_const (~lo
, ~hi
, imode
),
2829 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2831 return gen_lowpart (mode
, temp
);
2832 delete_insns_since (last
);
2836 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2837 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2839 rtx last
= get_last_insn ();
2841 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2843 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2849 delete_insns_since (last
);
2852 /* If this machine has expensive jumps, we can do integer absolute
2853 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2854 where W is the width of MODE. */
2856 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2858 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2859 size_int (GET_MODE_BITSIZE (mode
) - 1),
2862 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2865 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2866 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2872 /* If that does not win, use conditional jump and negate. */
2874 /* It is safe to use the target if it is the same
2875 as the source if this is also a pseudo register */
2876 if (op0
== target
&& GET_CODE (op0
) == REG
2877 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2880 op1
= gen_label_rtx ();
2881 if (target
== 0 || ! safe
2882 || GET_MODE (target
) != mode
2883 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2884 || (GET_CODE (target
) == REG
2885 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2886 target
= gen_reg_rtx (mode
);
2888 emit_move_insn (target
, op0
);
2891 /* If this mode is an integer too wide to compare properly,
2892 compare word by word. Rely on CSE to optimize constant cases. */
2893 if (GET_MODE_CLASS (mode
) == MODE_INT
2894 && ! can_compare_p (GE
, mode
, ccp_jump
))
2895 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2898 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2899 NULL_RTX
, NULL_RTX
, op1
);
2901 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2904 emit_move_insn (target
, op0
);
2910 /* Emit code to compute the absolute value of OP0, with result to
2911 TARGET if convenient. (TARGET may be 0.) The return value says
2912 where the result actually is to be found.
2914 MODE is the mode of the operand; the mode of the result is
2915 different but can be deduced from MODE.
2917 UNSIGNEDP is relevant for complex integer modes. */
2920 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2921 enum machine_mode mode
;
2926 enum mode_class
class = GET_MODE_CLASS (mode
);
2927 enum machine_mode wider_mode
;
2929 rtx entry_last
= get_last_insn ();
2932 optab this_abs_optab
;
2934 /* Find the correct mode for the real and imaginary parts. */
2935 enum machine_mode submode
= GET_MODE_INNER (mode
);
2937 if (submode
== BLKmode
)
2940 op0
= protect_from_queue (op0
, 0);
2944 op0
= force_not_mem (op0
);
2947 last
= get_last_insn ();
2950 target
= protect_from_queue (target
, 1);
2952 this_abs_optab
= ! unsignedp
&& flag_trapv
2953 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2954 ? absv_optab
: abs_optab
;
2956 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2958 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2959 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2965 temp
= gen_reg_rtx (submode
);
2967 if (GET_MODE (xop0
) != VOIDmode
2968 && GET_MODE (xop0
) != mode0
)
2969 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2971 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2973 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2974 xop0
= copy_to_mode_reg (mode0
, xop0
);
2976 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2977 temp
= gen_reg_rtx (submode
);
2979 pat
= GEN_FCN (icode
) (temp
, xop0
);
2982 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2983 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2986 delete_insns_since (last
);
2987 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2996 delete_insns_since (last
);
2999 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3001 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3002 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3004 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3005 != CODE_FOR_nothing
)
3009 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3010 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3014 if (class != MODE_COMPLEX_INT
)
3017 target
= gen_reg_rtx (submode
);
3018 convert_move (target
, temp
, 0);
3022 return gen_lowpart (submode
, temp
);
3025 delete_insns_since (last
);
3029 /* Open-code the complex absolute-value operation
3030 if we can open-code sqrt. Otherwise it's not worth while. */
3031 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
3034 rtx real
, imag
, total
;
3036 real
= gen_realpart (submode
, op0
);
3037 imag
= gen_imagpart (submode
, op0
);
3039 /* Square both parts. */
3040 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
3041 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
3043 /* Sum the parts. */
3044 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
3045 0, OPTAB_LIB_WIDEN
);
3047 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3048 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
3050 delete_insns_since (last
);
3055 /* Now try a library call in this mode. */
3056 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
3063 /* Pass 1 for NO_QUEUE so we don't lose any increments
3064 if the libcall is cse'd or moved. */
3065 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
3066 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
3067 insns
= get_insns ();
3070 target
= gen_reg_rtx (submode
);
3071 emit_libcall_block (insns
, target
, value
,
3072 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
3077 /* It can't be done in this mode. Can we do it in a wider mode? */
3079 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3080 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3082 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3083 != CODE_FOR_nothing
)
3084 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
3088 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3090 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3094 if (class != MODE_COMPLEX_INT
)
3097 target
= gen_reg_rtx (submode
);
3098 convert_move (target
, temp
, 0);
3102 return gen_lowpart (submode
, temp
);
3105 delete_insns_since (last
);
3109 delete_insns_since (entry_last
);
3113 /* Generate an instruction whose insn-code is INSN_CODE,
3114 with two operands: an output TARGET and an input OP0.
3115 TARGET *must* be nonzero, and the output is always stored there.
3116 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3117 the value that is stored into TARGET. */
3120 emit_unop_insn (icode
, target
, op0
, code
)
3127 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3130 temp
= target
= protect_from_queue (target
, 1);
3132 op0
= protect_from_queue (op0
, 0);
3134 /* Sign and zero extension from memory is often done specially on
3135 RISC machines, so forcing into a register here can pessimize
3137 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3138 op0
= force_not_mem (op0
);
3140 /* Now, if insn does not accept our operands, put them into pseudos. */
3142 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3143 op0
= copy_to_mode_reg (mode0
, op0
);
3145 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3146 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3147 temp
= gen_reg_rtx (GET_MODE (temp
));
3149 pat
= GEN_FCN (icode
) (temp
, op0
);
3151 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3152 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3157 emit_move_insn (target
, temp
);
3160 /* Emit code to perform a series of operations on a multi-word quantity, one
3163 Such a block is preceded by a CLOBBER of the output, consists of multiple
3164 insns, each setting one word of the output, and followed by a SET copying
3165 the output to itself.
3167 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3168 note indicating that it doesn't conflict with the (also multi-word)
3169 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3172 INSNS is a block of code generated to perform the operation, not including
3173 the CLOBBER and final copy. All insns that compute intermediate values
3174 are first emitted, followed by the block as described above.
3176 TARGET, OP0, and OP1 are the output and inputs of the operations,
3177 respectively. OP1 may be zero for a unary operation.
3179 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3182 If TARGET is not a register, INSNS is simply emitted with no special
3183 processing. Likewise if anything in INSNS is not an INSN or if
3184 there is a libcall block inside INSNS.
3186 The final insn emitted is returned. */
3189 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
3195 rtx prev
, next
, first
, last
, insn
;
3197 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3198 return emit_insn (insns
);
3200 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3201 if (GET_CODE (insn
) != INSN
3202 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3203 return emit_insn (insns
);
3205 /* First emit all insns that do not store into words of the output and remove
3206 these from the list. */
3207 for (insn
= insns
; insn
; insn
= next
)
3212 next
= NEXT_INSN (insn
);
3214 /* Some ports (cris) create an libcall regions at their own. We must
3215 avoid any potential nesting of LIBCALLs. */
3216 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3217 remove_note (insn
, note
);
3218 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3219 remove_note (insn
, note
);
3221 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3222 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3223 set
= PATTERN (insn
);
3224 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3226 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3227 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3229 set
= XVECEXP (PATTERN (insn
), 0, i
);
3237 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3239 if (PREV_INSN (insn
))
3240 NEXT_INSN (PREV_INSN (insn
)) = next
;
3245 PREV_INSN (next
) = PREV_INSN (insn
);
3251 prev
= get_last_insn ();
3253 /* Now write the CLOBBER of the output, followed by the setting of each
3254 of the words, followed by the final copy. */
3255 if (target
!= op0
&& target
!= op1
)
3256 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3258 for (insn
= insns
; insn
; insn
= next
)
3260 next
= NEXT_INSN (insn
);
3263 if (op1
&& GET_CODE (op1
) == REG
)
3264 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3267 if (op0
&& GET_CODE (op0
) == REG
)
3268 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3272 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3273 != CODE_FOR_nothing
)
3275 last
= emit_move_insn (target
, target
);
3277 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3281 last
= get_last_insn ();
3283 /* Remove any existing REG_EQUAL note from "last", or else it will
3284 be mistaken for a note referring to the full contents of the
3285 alleged libcall value when found together with the REG_RETVAL
3286 note added below. An existing note can come from an insn
3287 expansion at "last". */
3288 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3292 first
= get_insns ();
3294 first
= NEXT_INSN (prev
);
3296 /* Encapsulate the block so it gets manipulated as a unit. */
3297 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3299 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3304 /* Emit code to make a call to a constant function or a library call.
3306 INSNS is a list containing all insns emitted in the call.
3307 These insns leave the result in RESULT. Our block is to copy RESULT
3308 to TARGET, which is logically equivalent to EQUIV.
3310 We first emit any insns that set a pseudo on the assumption that these are
3311 loading constants into registers; doing so allows them to be safely cse'ed
3312 between blocks. Then we emit all the other insns in the block, followed by
3313 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3314 note with an operand of EQUIV.
3316 Moving assignments to pseudos outside of the block is done to improve
3317 the generated code, but is not required to generate correct code,
3318 hence being unable to move an assignment is not grounds for not making
3319 a libcall block. There are two reasons why it is safe to leave these
3320 insns inside the block: First, we know that these pseudos cannot be
3321 used in generated RTL outside the block since they are created for
3322 temporary purposes within the block. Second, CSE will not record the
3323 values of anything set inside a libcall block, so we know they must
3324 be dead at the end of the block.
3326 Except for the first group of insns (the ones setting pseudos), the
3327 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3330 emit_libcall_block (insns
, target
, result
, equiv
)
3336 rtx final_dest
= target
;
3337 rtx prev
, next
, first
, last
, insn
;
3339 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3340 into a MEM later. Protect the libcall block from this change. */
3341 if (! REG_P (target
) || REG_USERVAR_P (target
))
3342 target
= gen_reg_rtx (GET_MODE (target
));
3344 /* If we're using non-call exceptions, a libcall corresponding to an
3345 operation that may trap may also trap. */
3346 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3348 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3349 if (GET_CODE (insn
) == CALL_INSN
)
3351 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3353 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3354 remove_note (insn
, note
);
3358 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3359 reg note to indicate that this call cannot throw or execute a nonlocal
3360 goto (unless there is already a REG_EH_REGION note, in which case
3362 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3363 if (GET_CODE (insn
) == CALL_INSN
)
3365 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3368 XEXP (note
, 0) = GEN_INT (-1);
3370 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3374 /* First emit all insns that set pseudos. Remove them from the list as
3375 we go. Avoid insns that set pseudos which were referenced in previous
3376 insns. These can be generated by move_by_pieces, for example,
3377 to update an address. Similarly, avoid insns that reference things
3378 set in previous insns. */
3380 for (insn
= insns
; insn
; insn
= next
)
3382 rtx set
= single_set (insn
);
3385 /* Some ports (cris) create an libcall regions at their own. We must
3386 avoid any potential nesting of LIBCALLs. */
3387 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3388 remove_note (insn
, note
);
3389 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3390 remove_note (insn
, note
);
3392 next
= NEXT_INSN (insn
);
3394 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3395 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3397 || ((! INSN_P(insns
)
3398 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3399 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3400 && ! modified_in_p (SET_SRC (set
), insns
)
3401 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3403 if (PREV_INSN (insn
))
3404 NEXT_INSN (PREV_INSN (insn
)) = next
;
3409 PREV_INSN (next
) = PREV_INSN (insn
);
3415 prev
= get_last_insn ();
3417 /* Write the remaining insns followed by the final copy. */
3419 for (insn
= insns
; insn
; insn
= next
)
3421 next
= NEXT_INSN (insn
);
3426 last
= emit_move_insn (target
, result
);
3427 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3428 != CODE_FOR_nothing
)
3429 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3432 /* Remove any existing REG_EQUAL note from "last", or else it will
3433 be mistaken for a note referring to the full contents of the
3434 libcall value when found together with the REG_RETVAL note added
3435 below. An existing note can come from an insn expansion at
3437 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3440 if (final_dest
!= target
)
3441 emit_move_insn (final_dest
, target
);
3444 first
= get_insns ();
3446 first
= NEXT_INSN (prev
);
3448 /* Encapsulate the block so it gets manipulated as a unit. */
3449 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3451 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3452 when the encapsulated region would not be in one basic block,
3453 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3455 bool attach_libcall_retval_notes
= true;
3456 next
= NEXT_INSN (last
);
3457 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3458 if (control_flow_insn_p (insn
))
3460 attach_libcall_retval_notes
= false;
3464 if (attach_libcall_retval_notes
)
3466 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3468 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3474 /* Generate code to store zero in X. */
3480 emit_move_insn (x
, const0_rtx
);
3483 /* Generate code to store 1 in X
3484 assuming it contains zero beforehand. */
3487 emit_0_to_1_insn (x
)
3490 emit_move_insn (x
, const1_rtx
);
3493 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3494 PURPOSE describes how this comparison will be used. CODE is the rtx
3495 comparison code we will be using.
3497 ??? Actually, CODE is slightly weaker than that. A target is still
3498 required to implement all of the normal bcc operations, but not
3499 required to implement all (or any) of the unordered bcc operations. */
3502 can_compare_p (code
, mode
, purpose
)
3504 enum machine_mode mode
;
3505 enum can_compare_purpose purpose
;
3509 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3511 if (purpose
== ccp_jump
)
3512 return bcc_gen_fctn
[(int) code
] != NULL
;
3513 else if (purpose
== ccp_store_flag
)
3514 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3516 /* There's only one cmov entry point, and it's allowed to fail. */
3519 if (purpose
== ccp_jump
3520 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3522 if (purpose
== ccp_cmov
3523 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3525 if (purpose
== ccp_store_flag
3526 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3529 mode
= GET_MODE_WIDER_MODE (mode
);
3531 while (mode
!= VOIDmode
);
3536 /* This function is called when we are going to emit a compare instruction that
3537 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3539 *PMODE is the mode of the inputs (in case they are const_int).
3540 *PUNSIGNEDP nonzero says that the operands are unsigned;
3541 this matters if they need to be widened.
3543 If they have mode BLKmode, then SIZE specifies the size of both operands.
3545 This function performs all the setup necessary so that the caller only has
3546 to emit a single comparison insn. This setup can involve doing a BLKmode
3547 comparison or emitting a library call to perform the comparison if no insn
3548 is available to handle it.
3549 The values which are passed in through pointers can be modified; the caller
3550 should perform the comparison on the modified values. */
3553 prepare_cmp_insn (px
, py
, pcomparison
, size
, pmode
, punsignedp
, purpose
)
3555 enum rtx_code
*pcomparison
;
3557 enum machine_mode
*pmode
;
3559 enum can_compare_purpose purpose
;
3561 enum machine_mode mode
= *pmode
;
3562 rtx x
= *px
, y
= *py
;
3563 int unsignedp
= *punsignedp
;
3564 enum mode_class
class;
3566 class = GET_MODE_CLASS (mode
);
3568 /* They could both be VOIDmode if both args are immediate constants,
3569 but we should fold that at an earlier stage.
3570 With no special code here, this will call abort,
3571 reminding the programmer to implement such folding. */
3573 if (mode
!= BLKmode
&& flag_force_mem
)
3575 x
= force_not_mem (x
);
3576 y
= force_not_mem (y
);
3579 /* If we are inside an appropriately-short loop and one operand is an
3580 expensive constant, force it into a register. */
3581 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3582 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3583 x
= force_reg (mode
, x
);
3585 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3586 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3587 y
= force_reg (mode
, y
);
3590 /* Abort if we have a non-canonical comparison. The RTL documentation
3591 states that canonical comparisons are required only for targets which
3593 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3597 /* Don't let both operands fail to indicate the mode. */
3598 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3599 x
= force_reg (mode
, x
);
3601 /* Handle all BLKmode compares. */
3603 if (mode
== BLKmode
)
3606 enum machine_mode result_mode
;
3607 rtx opalign ATTRIBUTE_UNUSED
3608 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3611 x
= protect_from_queue (x
, 0);
3612 y
= protect_from_queue (y
, 0);
3616 #ifdef HAVE_cmpstrqi
3618 && GET_CODE (size
) == CONST_INT
3619 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
3621 result_mode
= insn_data
[(int) CODE_FOR_cmpstrqi
].operand
[0].mode
;
3622 result
= gen_reg_rtx (result_mode
);
3623 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, opalign
));
3627 #ifdef HAVE_cmpstrhi
3629 && GET_CODE (size
) == CONST_INT
3630 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
3632 result_mode
= insn_data
[(int) CODE_FOR_cmpstrhi
].operand
[0].mode
;
3633 result
= gen_reg_rtx (result_mode
);
3634 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, opalign
));
3638 #ifdef HAVE_cmpstrsi
3641 result_mode
= insn_data
[(int) CODE_FOR_cmpstrsi
].operand
[0].mode
;
3642 result
= gen_reg_rtx (result_mode
);
3643 size
= protect_from_queue (size
, 0);
3644 emit_insn (gen_cmpstrsi (result
, x
, y
,
3645 convert_to_mode (SImode
, size
, 1),
3651 #ifdef TARGET_MEM_FUNCTIONS
3652 result
= emit_library_call_value (memcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3653 TYPE_MODE (integer_type_node
), 3,
3654 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3655 convert_to_mode (TYPE_MODE (sizetype
), size
,
3656 TREE_UNSIGNED (sizetype
)),
3657 TYPE_MODE (sizetype
));
3659 result
= emit_library_call_value (bcmp_libfunc
, NULL_RTX
, LCT_PURE_MAKE_BLOCK
,
3660 TYPE_MODE (integer_type_node
), 3,
3661 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
3662 convert_to_mode (TYPE_MODE (integer_type_node
),
3664 TREE_UNSIGNED (integer_type_node
)),
3665 TYPE_MODE (integer_type_node
));
3668 result_mode
= TYPE_MODE (integer_type_node
);
3672 *pmode
= result_mode
;
3678 if (can_compare_p (*pcomparison
, mode
, purpose
))
3681 /* Handle a lib call just for the mode we are using. */
3683 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3685 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3688 /* If we want unsigned, and this mode has a distinct unsigned
3689 comparison routine, use that. */
3690 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3691 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3693 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3694 word_mode
, 2, x
, mode
, y
, mode
);
3696 /* Integer comparison returns a result that must be compared against 1,
3697 so that even if we do an unsigned compare afterward,
3698 there is still a value that can represent the result "less than". */
3705 if (class == MODE_FLOAT
)
3706 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3712 /* Before emitting an insn with code ICODE, make sure that X, which is going
3713 to be used for operand OPNUM of the insn, is converted from mode MODE to
3714 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3715 that it is accepted by the operand predicate. Return the new value. */
3718 prepare_operand (icode
, x
, opnum
, mode
, wider_mode
, unsignedp
)
3722 enum machine_mode mode
, wider_mode
;
3725 x
= protect_from_queue (x
, 0);
3727 if (mode
!= wider_mode
)
3728 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3730 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3731 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3732 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3736 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3737 we can do the comparison.
3738 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3739 be NULL_RTX which indicates that only a comparison is to be generated. */
3742 emit_cmp_and_jump_insn_1 (x
, y
, mode
, comparison
, unsignedp
, label
)
3744 enum machine_mode mode
;
3745 enum rtx_code comparison
;
3749 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3750 enum mode_class
class = GET_MODE_CLASS (mode
);
3751 enum machine_mode wider_mode
= mode
;
3753 /* Try combined insns first. */
3756 enum insn_code icode
;
3757 PUT_MODE (test
, wider_mode
);
3761 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3763 if (icode
!= CODE_FOR_nothing
3764 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3766 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3767 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3768 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3773 /* Handle some compares against zero. */
3774 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3775 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3777 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3778 emit_insn (GEN_FCN (icode
) (x
));
3780 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3784 /* Handle compares for which there is a directly suitable insn. */
3786 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3787 if (icode
!= CODE_FOR_nothing
)
3789 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3790 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3791 emit_insn (GEN_FCN (icode
) (x
, y
));
3793 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3797 if (class != MODE_INT
&& class != MODE_FLOAT
3798 && class != MODE_COMPLEX_FLOAT
)
3801 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3803 while (wider_mode
!= VOIDmode
);
3808 /* Generate code to compare X with Y so that the condition codes are
3809 set and to jump to LABEL if the condition is true. If X is a
3810 constant and Y is not a constant, then the comparison is swapped to
3811 ensure that the comparison RTL has the canonical form.
3813 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3814 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3815 the proper branch condition code.
3817 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3819 MODE is the mode of the inputs (in case they are const_int).
3821 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3822 be passed unchanged to emit_cmp_insn, then potentially converted into an
3823 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3826 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, label
)
3828 enum rtx_code comparison
;
3830 enum machine_mode mode
;
3834 rtx op0
= x
, op1
= y
;
3836 /* Swap operands and condition to ensure canonical RTL. */
3837 if (swap_commutative_operands_p (x
, y
))
3839 /* If we're not emitting a branch, this means some caller
3845 comparison
= swap_condition (comparison
);
3849 /* If OP0 is still a constant, then both X and Y must be constants. Force
3850 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3852 if (CONSTANT_P (op0
))
3853 op0
= force_reg (mode
, op0
);
3858 comparison
= unsigned_condition (comparison
);
3860 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3862 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3865 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3868 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
)
3870 enum rtx_code comparison
;
3872 enum machine_mode mode
;
3875 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3878 /* Emit a library call comparison between floating point X and Y.
3879 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3882 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
)
3884 enum rtx_code
*pcomparison
;
3885 enum machine_mode
*pmode
;
3888 enum rtx_code comparison
= *pcomparison
;
3890 rtx x
= *px
= protect_from_queue (*px
, 0);
3891 rtx y
= *py
= protect_from_queue (*py
, 0);
3892 enum machine_mode mode
= GET_MODE (x
);
3900 libfunc
= eqhf2_libfunc
;
3904 libfunc
= nehf2_libfunc
;
3908 libfunc
= gthf2_libfunc
;
3909 if (libfunc
== NULL_RTX
)
3911 tmp
= x
; x
= y
; y
= tmp
;
3913 libfunc
= lthf2_libfunc
;
3918 libfunc
= gehf2_libfunc
;
3919 if (libfunc
== NULL_RTX
)
3921 tmp
= x
; x
= y
; y
= tmp
;
3923 libfunc
= lehf2_libfunc
;
3928 libfunc
= lthf2_libfunc
;
3929 if (libfunc
== NULL_RTX
)
3931 tmp
= x
; x
= y
; y
= tmp
;
3933 libfunc
= gthf2_libfunc
;
3938 libfunc
= lehf2_libfunc
;
3939 if (libfunc
== NULL_RTX
)
3941 tmp
= x
; x
= y
; y
= tmp
;
3943 libfunc
= gehf2_libfunc
;
3948 libfunc
= unordhf2_libfunc
;
3954 else if (mode
== SFmode
)
3958 libfunc
= eqsf2_libfunc
;
3962 libfunc
= nesf2_libfunc
;
3966 libfunc
= gtsf2_libfunc
;
3967 if (libfunc
== NULL_RTX
)
3969 tmp
= x
; x
= y
; y
= tmp
;
3971 libfunc
= ltsf2_libfunc
;
3976 libfunc
= gesf2_libfunc
;
3977 if (libfunc
== NULL_RTX
)
3979 tmp
= x
; x
= y
; y
= tmp
;
3981 libfunc
= lesf2_libfunc
;
3986 libfunc
= ltsf2_libfunc
;
3987 if (libfunc
== NULL_RTX
)
3989 tmp
= x
; x
= y
; y
= tmp
;
3991 libfunc
= gtsf2_libfunc
;
3996 libfunc
= lesf2_libfunc
;
3997 if (libfunc
== NULL_RTX
)
3999 tmp
= x
; x
= y
; y
= tmp
;
4001 libfunc
= gesf2_libfunc
;
4006 libfunc
= unordsf2_libfunc
;
4012 else if (mode
== DFmode
)
4016 libfunc
= eqdf2_libfunc
;
4020 libfunc
= nedf2_libfunc
;
4024 libfunc
= gtdf2_libfunc
;
4025 if (libfunc
== NULL_RTX
)
4027 tmp
= x
; x
= y
; y
= tmp
;
4029 libfunc
= ltdf2_libfunc
;
4034 libfunc
= gedf2_libfunc
;
4035 if (libfunc
== NULL_RTX
)
4037 tmp
= x
; x
= y
; y
= tmp
;
4039 libfunc
= ledf2_libfunc
;
4044 libfunc
= ltdf2_libfunc
;
4045 if (libfunc
== NULL_RTX
)
4047 tmp
= x
; x
= y
; y
= tmp
;
4049 libfunc
= gtdf2_libfunc
;
4054 libfunc
= ledf2_libfunc
;
4055 if (libfunc
== NULL_RTX
)
4057 tmp
= x
; x
= y
; y
= tmp
;
4059 libfunc
= gedf2_libfunc
;
4064 libfunc
= unorddf2_libfunc
;
4070 else if (mode
== XFmode
)
4074 libfunc
= eqxf2_libfunc
;
4078 libfunc
= nexf2_libfunc
;
4082 libfunc
= gtxf2_libfunc
;
4083 if (libfunc
== NULL_RTX
)
4085 tmp
= x
; x
= y
; y
= tmp
;
4087 libfunc
= ltxf2_libfunc
;
4092 libfunc
= gexf2_libfunc
;
4093 if (libfunc
== NULL_RTX
)
4095 tmp
= x
; x
= y
; y
= tmp
;
4097 libfunc
= lexf2_libfunc
;
4102 libfunc
= ltxf2_libfunc
;
4103 if (libfunc
== NULL_RTX
)
4105 tmp
= x
; x
= y
; y
= tmp
;
4107 libfunc
= gtxf2_libfunc
;
4112 libfunc
= lexf2_libfunc
;
4113 if (libfunc
== NULL_RTX
)
4115 tmp
= x
; x
= y
; y
= tmp
;
4117 libfunc
= gexf2_libfunc
;
4122 libfunc
= unordxf2_libfunc
;
4128 else if (mode
== TFmode
)
4132 libfunc
= eqtf2_libfunc
;
4136 libfunc
= netf2_libfunc
;
4140 libfunc
= gttf2_libfunc
;
4141 if (libfunc
== NULL_RTX
)
4143 tmp
= x
; x
= y
; y
= tmp
;
4145 libfunc
= lttf2_libfunc
;
4150 libfunc
= getf2_libfunc
;
4151 if (libfunc
== NULL_RTX
)
4153 tmp
= x
; x
= y
; y
= tmp
;
4155 libfunc
= letf2_libfunc
;
4160 libfunc
= lttf2_libfunc
;
4161 if (libfunc
== NULL_RTX
)
4163 tmp
= x
; x
= y
; y
= tmp
;
4165 libfunc
= gttf2_libfunc
;
4170 libfunc
= letf2_libfunc
;
4171 if (libfunc
== NULL_RTX
)
4173 tmp
= x
; x
= y
; y
= tmp
;
4175 libfunc
= getf2_libfunc
;
4180 libfunc
= unordtf2_libfunc
;
4188 enum machine_mode wider_mode
;
4190 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
4191 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
4193 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
4194 != CODE_FOR_nothing
)
4195 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
4197 x
= protect_from_queue (x
, 0);
4198 y
= protect_from_queue (y
, 0);
4199 *px
= convert_to_mode (wider_mode
, x
, 0);
4200 *py
= convert_to_mode (wider_mode
, y
, 0);
4201 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
4211 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
4212 word_mode
, 2, x
, mode
, y
, mode
);
4216 if (comparison
== UNORDERED
)
4218 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
4219 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4225 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4228 emit_indirect_jump (loc
)
4231 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
4233 loc
= copy_to_mode_reg (Pmode
, loc
);
4235 emit_jump_insn (gen_indirect_jump (loc
));
4239 #ifdef HAVE_conditional_move
4241 /* Emit a conditional move instruction if the machine supports one for that
4242 condition and machine mode.
4244 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4245 the mode to use should they be constants. If it is VOIDmode, they cannot
4248 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4249 should be stored there. MODE is the mode to use should they be constants.
4250 If it is VOIDmode, they cannot both be constants.
4252 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4253 is not supported. */
4256 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
4261 enum machine_mode cmode
;
4263 enum machine_mode mode
;
4266 rtx tem
, subtarget
, comparison
, insn
;
4267 enum insn_code icode
;
4268 enum rtx_code reversed
;
4270 /* If one operand is constant, make it the second one. Only do this
4271 if the other operand is not constant as well. */
4273 if (swap_commutative_operands_p (op0
, op1
))
4278 code
= swap_condition (code
);
4281 /* get_condition will prefer to generate LT and GT even if the old
4282 comparison was against zero, so undo that canonicalization here since
4283 comparisons against zero are cheaper. */
4284 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4285 code
= LE
, op1
= const0_rtx
;
4286 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4287 code
= GE
, op1
= const0_rtx
;
4289 if (cmode
== VOIDmode
)
4290 cmode
= GET_MODE (op0
);
4292 if (swap_commutative_operands_p (op2
, op3
)
4293 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4302 if (mode
== VOIDmode
)
4303 mode
= GET_MODE (op2
);
4305 icode
= movcc_gen_code
[mode
];
4307 if (icode
== CODE_FOR_nothing
)
4312 op2
= force_not_mem (op2
);
4313 op3
= force_not_mem (op3
);
4317 target
= protect_from_queue (target
, 1);
4319 target
= gen_reg_rtx (mode
);
4325 op2
= protect_from_queue (op2
, 0);
4326 op3
= protect_from_queue (op3
, 0);
4328 /* If the insn doesn't accept these operands, put them in pseudos. */
4330 if (! (*insn_data
[icode
].operand
[0].predicate
)
4331 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4332 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4334 if (! (*insn_data
[icode
].operand
[2].predicate
)
4335 (op2
, insn_data
[icode
].operand
[2].mode
))
4336 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4338 if (! (*insn_data
[icode
].operand
[3].predicate
)
4339 (op3
, insn_data
[icode
].operand
[3].mode
))
4340 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4342 /* Everything should now be in the suitable form, so emit the compare insn
4343 and then the conditional move. */
4346 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4348 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4349 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4350 return NULL and let the caller figure out how best to deal with this
4352 if (GET_CODE (comparison
) != code
)
4355 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4357 /* If that failed, then give up. */
4363 if (subtarget
!= target
)
4364 convert_move (target
, subtarget
, 0);
4369 /* Return nonzero if a conditional move of mode MODE is supported.
4371 This function is for combine so it can tell whether an insn that looks
4372 like a conditional move is actually supported by the hardware. If we
4373 guess wrong we lose a bit on optimization, but that's it. */
4374 /* ??? sparc64 supports conditionally moving integers values based on fp
4375 comparisons, and vice versa. How do we handle them? */
4378 can_conditionally_move_p (mode
)
4379 enum machine_mode mode
;
4381 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4387 #endif /* HAVE_conditional_move */
4389 /* Emit a conditional addition instruction if the machine supports one for that
4390 condition and machine mode.
4392 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4393 the mode to use should they be constants. If it is VOIDmode, they cannot
4396 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4397 should be stored there. MODE is the mode to use should they be constants.
4398 If it is VOIDmode, they cannot both be constants.
4400 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4401 is not supported. */
4404 emit_conditional_add (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
4409 enum machine_mode cmode
;
4411 enum machine_mode mode
;
4414 rtx tem
, subtarget
, comparison
, insn
;
4415 enum insn_code icode
;
4416 enum rtx_code reversed
;
4418 /* If one operand is constant, make it the second one. Only do this
4419 if the other operand is not constant as well. */
4421 if (swap_commutative_operands_p (op0
, op1
))
4426 code
= swap_condition (code
);
4429 /* get_condition will prefer to generate LT and GT even if the old
4430 comparison was against zero, so undo that canonicalization here since
4431 comparisons against zero are cheaper. */
4432 if (code
== LT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 1)
4433 code
= LE
, op1
= const0_rtx
;
4434 else if (code
== GT
&& GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == -1)
4435 code
= GE
, op1
= const0_rtx
;
4437 if (cmode
== VOIDmode
)
4438 cmode
= GET_MODE (op0
);
4440 if (swap_commutative_operands_p (op2
, op3
)
4441 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4450 if (mode
== VOIDmode
)
4451 mode
= GET_MODE (op2
);
4453 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4455 if (icode
== CODE_FOR_nothing
)
4460 op2
= force_not_mem (op2
);
4461 op3
= force_not_mem (op3
);
4465 target
= protect_from_queue (target
, 1);
4467 target
= gen_reg_rtx (mode
);
4473 op2
= protect_from_queue (op2
, 0);
4474 op3
= protect_from_queue (op3
, 0);
4476 /* If the insn doesn't accept these operands, put them in pseudos. */
4478 if (! (*insn_data
[icode
].operand
[0].predicate
)
4479 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4480 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4482 if (! (*insn_data
[icode
].operand
[2].predicate
)
4483 (op2
, insn_data
[icode
].operand
[2].mode
))
4484 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4486 if (! (*insn_data
[icode
].operand
[3].predicate
)
4487 (op3
, insn_data
[icode
].operand
[3].mode
))
4488 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4490 /* Everything should now be in the suitable form, so emit the compare insn
4491 and then the conditional move. */
4494 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4496 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4497 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4498 return NULL and let the caller figure out how best to deal with this
4500 if (GET_CODE (comparison
) != code
)
4503 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4505 /* If that failed, then give up. */
4511 if (subtarget
!= target
)
4512 convert_move (target
, subtarget
, 0);
4517 /* These functions generate an insn body and return it
4518 rather than emitting the insn.
4520 They do not protect from queued increments,
4521 because they may be used 1) in protect_from_queue itself
4522 and 2) in other passes where there is no queue. */
4524 /* Generate and return an insn body to add Y to X. */
4527 gen_add2_insn (x
, y
)
4530 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4532 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4533 (x
, insn_data
[icode
].operand
[0].mode
))
4534 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4535 (x
, insn_data
[icode
].operand
[1].mode
))
4536 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4537 (y
, insn_data
[icode
].operand
[2].mode
)))
4540 return (GEN_FCN (icode
) (x
, x
, y
));
4543 /* Generate and return an insn body to add r1 and c,
4544 storing the result in r0. */
4546 gen_add3_insn (r0
, r1
, c
)
4549 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4551 if (icode
== CODE_FOR_nothing
4552 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4553 (r0
, insn_data
[icode
].operand
[0].mode
))
4554 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4555 (r1
, insn_data
[icode
].operand
[1].mode
))
4556 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4557 (c
, insn_data
[icode
].operand
[2].mode
)))
4560 return (GEN_FCN (icode
) (r0
, r1
, c
));
4564 have_add2_insn (x
, y
)
4569 if (GET_MODE (x
) == VOIDmode
)
4572 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4574 if (icode
== CODE_FOR_nothing
)
4577 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4578 (x
, insn_data
[icode
].operand
[0].mode
))
4579 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4580 (x
, insn_data
[icode
].operand
[1].mode
))
4581 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4582 (y
, insn_data
[icode
].operand
[2].mode
)))
4588 /* Generate and return an insn body to subtract Y from X. */
4591 gen_sub2_insn (x
, y
)
4594 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4596 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4597 (x
, insn_data
[icode
].operand
[0].mode
))
4598 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4599 (x
, insn_data
[icode
].operand
[1].mode
))
4600 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4601 (y
, insn_data
[icode
].operand
[2].mode
)))
4604 return (GEN_FCN (icode
) (x
, x
, y
));
4607 /* Generate and return an insn body to subtract r1 and c,
4608 storing the result in r0. */
4610 gen_sub3_insn (r0
, r1
, c
)
4613 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4615 if (icode
== CODE_FOR_nothing
4616 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4617 (r0
, insn_data
[icode
].operand
[0].mode
))
4618 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4619 (r1
, insn_data
[icode
].operand
[1].mode
))
4620 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4621 (c
, insn_data
[icode
].operand
[2].mode
)))
4624 return (GEN_FCN (icode
) (r0
, r1
, c
));
4628 have_sub2_insn (x
, y
)
4633 if (GET_MODE (x
) == VOIDmode
)
4636 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4638 if (icode
== CODE_FOR_nothing
)
4641 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4642 (x
, insn_data
[icode
].operand
[0].mode
))
4643 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4644 (x
, insn_data
[icode
].operand
[1].mode
))
4645 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4646 (y
, insn_data
[icode
].operand
[2].mode
)))
4652 /* Generate the body of an instruction to copy Y into X.
4653 It may be a list of insns, if one insn isn't enough. */
4656 gen_move_insn (x
, y
)
4659 enum machine_mode mode
= GET_MODE (x
);
4660 enum insn_code insn_code
;
4663 if (mode
== VOIDmode
)
4664 mode
= GET_MODE (y
);
4666 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
4668 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
4669 find a mode to do it in. If we have a movcc, use it. Otherwise,
4670 find the MODE_INT mode of the same width. */
4672 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
4674 enum machine_mode tmode
= VOIDmode
;
4678 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
4681 for (tmode
= QImode
; tmode
!= VOIDmode
;
4682 tmode
= GET_MODE_WIDER_MODE (tmode
))
4683 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
4686 if (tmode
== VOIDmode
)
4689 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
4690 may call change_address which is not appropriate if we were
4691 called when a reload was in progress. We don't have to worry
4692 about changing the address since the size in bytes is supposed to
4693 be the same. Copy the MEM to change the mode and move any
4694 substitutions from the old MEM to the new one. */
4696 if (reload_in_progress
)
4698 x
= gen_lowpart_common (tmode
, x1
);
4699 if (x
== 0 && GET_CODE (x1
) == MEM
)
4701 x
= adjust_address_nv (x1
, tmode
, 0);
4702 copy_replacements (x1
, x
);
4705 y
= gen_lowpart_common (tmode
, y1
);
4706 if (y
== 0 && GET_CODE (y1
) == MEM
)
4708 y
= adjust_address_nv (y1
, tmode
, 0);
4709 copy_replacements (y1
, y
);
4714 x
= gen_lowpart (tmode
, x
);
4715 y
= gen_lowpart (tmode
, y
);
4718 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
4719 return (GEN_FCN (insn_code
) (x
, y
));
4723 emit_move_insn_1 (x
, y
);
4729 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4730 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4731 no such operation exists, CODE_FOR_nothing will be returned. */
4734 can_extend_p (to_mode
, from_mode
, unsignedp
)
4735 enum machine_mode to_mode
, from_mode
;
4738 #ifdef HAVE_ptr_extend
4740 return CODE_FOR_ptr_extend
;
4743 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
!= 0];
4746 /* Generate the body of an insn to extend Y (with mode MFROM)
4747 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4750 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
4752 enum machine_mode mto
, mfrom
;
4755 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
!= 0]) (x
, y
));
4758 /* can_fix_p and can_float_p say whether the target machine
4759 can directly convert a given fixed point type to
4760 a given floating point type, or vice versa.
4761 The returned value is the CODE_FOR_... value to use,
4762 or CODE_FOR_nothing if these modes cannot be directly converted.
4764 *TRUNCP_PTR is set to 1 if it is necessary to output
4765 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4767 static enum insn_code
4768 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
4769 enum machine_mode fltmode
, fixmode
;
4774 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0]
4775 != CODE_FOR_nothing
)
4776 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4778 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
4781 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4783 return CODE_FOR_nothing
;
4786 static enum insn_code
4787 can_float_p (fltmode
, fixmode
, unsignedp
)
4788 enum machine_mode fixmode
, fltmode
;
4791 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
!= 0];
4794 /* Generate code to convert FROM to floating point
4795 and store in TO. FROM must be fixed point and not VOIDmode.
4796 UNSIGNEDP nonzero means regard FROM as unsigned.
4797 Normally this is done by correcting the final value
4798 if it is negative. */
4801 expand_float (to
, from
, unsignedp
)
4805 enum insn_code icode
;
4807 enum machine_mode fmode
, imode
;
4809 /* Crash now, because we won't be able to decide which mode to use. */
4810 if (GET_MODE (from
) == VOIDmode
)
4813 /* Look for an insn to do the conversion. Do it in the specified
4814 modes if possible; otherwise convert either input, output or both to
4815 wider mode. If the integer mode is wider than the mode of FROM,
4816 we can do the conversion signed even if the input is unsigned. */
4818 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4819 fmode
= GET_MODE_WIDER_MODE (fmode
))
4820 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4821 imode
= GET_MODE_WIDER_MODE (imode
))
4823 int doing_unsigned
= unsignedp
;
4825 if (fmode
!= GET_MODE (to
)
4826 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4829 icode
= can_float_p (fmode
, imode
, unsignedp
);
4830 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4831 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4833 if (icode
!= CODE_FOR_nothing
)
4835 to
= protect_from_queue (to
, 1);
4836 from
= protect_from_queue (from
, 0);
4838 if (imode
!= GET_MODE (from
))
4839 from
= convert_to_mode (imode
, from
, unsignedp
);
4841 if (fmode
!= GET_MODE (to
))
4842 target
= gen_reg_rtx (fmode
);
4844 emit_unop_insn (icode
, target
, from
,
4845 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4848 convert_move (to
, target
, 0);
4853 /* Unsigned integer, and no way to convert directly.
4854 Convert as signed, then conditionally adjust the result. */
4857 rtx label
= gen_label_rtx ();
4859 REAL_VALUE_TYPE offset
;
4863 to
= protect_from_queue (to
, 1);
4864 from
= protect_from_queue (from
, 0);
4867 from
= force_not_mem (from
);
4869 /* Look for a usable floating mode FMODE wider than the source and at
4870 least as wide as the target. Using FMODE will avoid rounding woes
4871 with unsigned values greater than the signed maximum value. */
4873 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4874 fmode
= GET_MODE_WIDER_MODE (fmode
))
4875 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4876 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4879 if (fmode
== VOIDmode
)
4881 /* There is no such mode. Pretend the target is wide enough. */
4882 fmode
= GET_MODE (to
);
4884 /* Avoid double-rounding when TO is narrower than FROM. */
4885 if ((significand_size (fmode
) + 1)
4886 < GET_MODE_BITSIZE (GET_MODE (from
)))
4889 rtx neglabel
= gen_label_rtx ();
4891 /* Don't use TARGET if it isn't a register, is a hard register,
4892 or is the wrong mode. */
4893 if (GET_CODE (target
) != REG
4894 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4895 || GET_MODE (target
) != fmode
)
4896 target
= gen_reg_rtx (fmode
);
4898 imode
= GET_MODE (from
);
4899 do_pending_stack_adjust ();
4901 /* Test whether the sign bit is set. */
4902 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4905 /* The sign bit is not set. Convert as signed. */
4906 expand_float (target
, from
, 0);
4907 emit_jump_insn (gen_jump (label
));
4910 /* The sign bit is set.
4911 Convert to a usable (positive signed) value by shifting right
4912 one bit, while remembering if a nonzero bit was shifted
4913 out; i.e., compute (from & 1) | (from >> 1). */
4915 emit_label (neglabel
);
4916 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4917 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4918 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4920 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4922 expand_float (target
, temp
, 0);
4924 /* Multiply by 2 to undo the shift above. */
4925 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4926 target
, 0, OPTAB_LIB_WIDEN
);
4928 emit_move_insn (target
, temp
);
4930 do_pending_stack_adjust ();
4936 /* If we are about to do some arithmetic to correct for an
4937 unsigned operand, do it in a pseudo-register. */
4939 if (GET_MODE (to
) != fmode
4940 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4941 target
= gen_reg_rtx (fmode
);
4943 /* Convert as signed integer to floating. */
4944 expand_float (target
, from
, 0);
4946 /* If FROM is negative (and therefore TO is negative),
4947 correct its value by 2**bitwidth. */
4949 do_pending_stack_adjust ();
4950 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4954 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4955 temp
= expand_binop (fmode
, add_optab
, target
,
4956 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4957 target
, 0, OPTAB_LIB_WIDEN
);
4959 emit_move_insn (target
, temp
);
4961 do_pending_stack_adjust ();
4966 /* No hardware instruction available; call a library routine to convert from
4967 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4973 to
= protect_from_queue (to
, 1);
4974 from
= protect_from_queue (from
, 0);
4976 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4977 from
= convert_to_mode (SImode
, from
, unsignedp
);
4980 from
= force_not_mem (from
);
4982 if (GET_MODE (to
) == SFmode
)
4984 if (GET_MODE (from
) == SImode
)
4985 libfcn
= floatsisf_libfunc
;
4986 else if (GET_MODE (from
) == DImode
)
4987 libfcn
= floatdisf_libfunc
;
4988 else if (GET_MODE (from
) == TImode
)
4989 libfcn
= floattisf_libfunc
;
4993 else if (GET_MODE (to
) == DFmode
)
4995 if (GET_MODE (from
) == SImode
)
4996 libfcn
= floatsidf_libfunc
;
4997 else if (GET_MODE (from
) == DImode
)
4998 libfcn
= floatdidf_libfunc
;
4999 else if (GET_MODE (from
) == TImode
)
5000 libfcn
= floattidf_libfunc
;
5004 else if (GET_MODE (to
) == XFmode
)
5006 if (GET_MODE (from
) == SImode
)
5007 libfcn
= floatsixf_libfunc
;
5008 else if (GET_MODE (from
) == DImode
)
5009 libfcn
= floatdixf_libfunc
;
5010 else if (GET_MODE (from
) == TImode
)
5011 libfcn
= floattixf_libfunc
;
5015 else if (GET_MODE (to
) == TFmode
)
5017 if (GET_MODE (from
) == SImode
)
5018 libfcn
= floatsitf_libfunc
;
5019 else if (GET_MODE (from
) == DImode
)
5020 libfcn
= floatditf_libfunc
;
5021 else if (GET_MODE (from
) == TImode
)
5022 libfcn
= floattitf_libfunc
;
5031 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
5032 GET_MODE (to
), 1, from
,
5034 insns
= get_insns ();
5037 emit_libcall_block (insns
, target
, value
,
5038 gen_rtx_FLOAT (GET_MODE (to
), from
));
5043 /* Copy result to requested destination
5044 if we have been computing in a temp location. */
5048 if (GET_MODE (target
) == GET_MODE (to
))
5049 emit_move_insn (to
, target
);
5051 convert_move (to
, target
, 0);
5055 /* expand_fix: generate code to convert FROM to fixed point
5056 and store in TO. FROM must be floating point. */
5062 rtx temp
= gen_reg_rtx (GET_MODE (x
));
5063 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
5067 expand_fix (to
, from
, unsignedp
)
5071 enum insn_code icode
;
5073 enum machine_mode fmode
, imode
;
5077 /* We first try to find a pair of modes, one real and one integer, at
5078 least as wide as FROM and TO, respectively, in which we can open-code
5079 this conversion. If the integer mode is wider than the mode of TO,
5080 we can do the conversion either signed or unsigned. */
5082 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5083 fmode
= GET_MODE_WIDER_MODE (fmode
))
5084 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5085 imode
= GET_MODE_WIDER_MODE (imode
))
5087 int doing_unsigned
= unsignedp
;
5089 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5090 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5091 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5093 if (icode
!= CODE_FOR_nothing
)
5095 to
= protect_from_queue (to
, 1);
5096 from
= protect_from_queue (from
, 0);
5098 if (fmode
!= GET_MODE (from
))
5099 from
= convert_to_mode (fmode
, from
, 0);
5102 from
= ftruncify (from
);
5104 if (imode
!= GET_MODE (to
))
5105 target
= gen_reg_rtx (imode
);
5107 emit_unop_insn (icode
, target
, from
,
5108 doing_unsigned
? UNSIGNED_FIX
: FIX
);
5110 convert_move (to
, target
, unsignedp
);
5115 /* For an unsigned conversion, there is one more way to do it.
5116 If we have a signed conversion, we generate code that compares
5117 the real value to the largest representable positive number. If if
5118 is smaller, the conversion is done normally. Otherwise, subtract
5119 one plus the highest signed number, convert, and add it back.
5121 We only need to check all real modes, since we know we didn't find
5122 anything with a wider integer mode. */
5124 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
5125 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5126 fmode
= GET_MODE_WIDER_MODE (fmode
))
5127 /* Make sure we won't lose significant bits doing this. */
5128 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
5129 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
5133 REAL_VALUE_TYPE offset
;
5134 rtx limit
, lab1
, lab2
, insn
;
5136 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
5137 real_2expN (&offset
, bitsize
- 1);
5138 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
5139 lab1
= gen_label_rtx ();
5140 lab2
= gen_label_rtx ();
5143 to
= protect_from_queue (to
, 1);
5144 from
= protect_from_queue (from
, 0);
5147 from
= force_not_mem (from
);
5149 if (fmode
!= GET_MODE (from
))
5150 from
= convert_to_mode (fmode
, from
, 0);
5152 /* See if we need to do the subtraction. */
5153 do_pending_stack_adjust ();
5154 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
5157 /* If not, do the signed "fix" and branch around fixup code. */
5158 expand_fix (to
, from
, 0);
5159 emit_jump_insn (gen_jump (lab2
));
5162 /* Otherwise, subtract 2**(N-1), convert to signed number,
5163 then add 2**(N-1). Do the addition using XOR since this
5164 will often generate better code. */
5166 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5167 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5168 expand_fix (to
, target
, 0);
5169 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
5171 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
5173 to
, 1, OPTAB_LIB_WIDEN
);
5176 emit_move_insn (to
, target
);
5180 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
5181 != CODE_FOR_nothing
)
5183 /* Make a place for a REG_NOTE and add it. */
5184 insn
= emit_move_insn (to
, to
);
5185 set_unique_reg_note (insn
,
5187 gen_rtx_fmt_e (UNSIGNED_FIX
,
5195 /* We can't do it with an insn, so use a library call. But first ensure
5196 that the mode of TO is at least as wide as SImode, since those are the
5197 only library calls we know about. */
5199 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
5201 target
= gen_reg_rtx (SImode
);
5203 expand_fix (target
, from
, unsignedp
);
5205 else if (GET_MODE (from
) == SFmode
)
5207 if (GET_MODE (to
) == SImode
)
5208 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
5209 else if (GET_MODE (to
) == DImode
)
5210 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
5211 else if (GET_MODE (to
) == TImode
)
5212 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
5216 else if (GET_MODE (from
) == DFmode
)
5218 if (GET_MODE (to
) == SImode
)
5219 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
5220 else if (GET_MODE (to
) == DImode
)
5221 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
5222 else if (GET_MODE (to
) == TImode
)
5223 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
5227 else if (GET_MODE (from
) == XFmode
)
5229 if (GET_MODE (to
) == SImode
)
5230 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
5231 else if (GET_MODE (to
) == DImode
)
5232 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
5233 else if (GET_MODE (to
) == TImode
)
5234 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
5238 else if (GET_MODE (from
) == TFmode
)
5240 if (GET_MODE (to
) == SImode
)
5241 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
5242 else if (GET_MODE (to
) == DImode
)
5243 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
5244 else if (GET_MODE (to
) == TImode
)
5245 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
5257 to
= protect_from_queue (to
, 1);
5258 from
= protect_from_queue (from
, 0);
5261 from
= force_not_mem (from
);
5265 value
= emit_library_call_value (libfcn
, NULL_RTX
, LCT_CONST
,
5266 GET_MODE (to
), 1, from
,
5268 insns
= get_insns ();
5271 emit_libcall_block (insns
, target
, value
,
5272 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5273 GET_MODE (to
), from
));
5278 if (GET_MODE (to
) == GET_MODE (target
))
5279 emit_move_insn (to
, target
);
5281 convert_move (to
, target
, 0);
5285 /* Report whether we have an instruction to perform the operation
5286 specified by CODE on operands of mode MODE. */
5288 have_insn_for (code
, mode
)
5290 enum machine_mode mode
;
5292 return (code_to_optab
[(int) code
] != 0
5293 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
5294 != CODE_FOR_nothing
));
5297 /* Create a blank optab. */
5302 optab op
= (optab
) ggc_alloc (sizeof (struct optab
));
5303 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5305 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
5306 op
->handlers
[i
].libfunc
= 0;
5312 /* Same, but fill in its code as CODE, and write it into the
5313 code_to_optab table. */
5318 optab op
= new_optab ();
5320 code_to_optab
[(int) code
] = op
;
5324 /* Same, but fill in its code as CODE, and do _not_ write it into
5325 the code_to_optab table. */
5330 optab op
= new_optab ();
5335 /* Initialize the libfunc fields of an entire group of entries in some
5336 optab. Each entry is set equal to a string consisting of a leading
5337 pair of underscores followed by a generic operation name followed by
5338 a mode name (downshifted to lower case) followed by a single character
5339 representing the number of operands for the given operation (which is
5340 usually one of the characters '2', '3', or '4').
5342 OPTABLE is the table in which libfunc fields are to be initialized.
5343 FIRST_MODE is the first machine mode index in the given optab to
5345 LAST_MODE is the last machine mode index in the given optab to
5347 OPNAME is the generic (string) name of the operation.
5348 SUFFIX is the character which specifies the number of operands for
5349 the given generic operation.
5353 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
5361 unsigned opname_len
= strlen (opname
);
5363 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
5364 mode
= (enum machine_mode
) ((int) mode
+ 1))
5366 const char *mname
= GET_MODE_NAME (mode
);
5367 unsigned mname_len
= strlen (mname
);
5368 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
5375 for (q
= opname
; *q
; )
5377 for (q
= mname
; *q
; q
++)
5378 *p
++ = TOLOWER (*q
);
5382 optable
->handlers
[(int) mode
].libfunc
5383 = gen_rtx_SYMBOL_REF (Pmode
, ggc_alloc_string (libfunc_name
,
5388 /* Initialize the libfunc fields of an entire group of entries in some
5389 optab which correspond to all integer mode operations. The parameters
5390 have the same meaning as similarly named ones for the `init_libfuncs'
5391 routine. (See above). */
5394 init_integral_libfuncs (optable
, opname
, suffix
)
5399 init_libfuncs (optable
, word_mode
,
5400 mode_for_size (2*BITS_PER_WORD
, MODE_INT
, 0),
5404 /* Initialize the libfunc fields of an entire group of entries in some
5405 optab which correspond to all real mode operations. The parameters
5406 have the same meaning as similarly named ones for the `init_libfuncs'
5407 routine. (See above). */
5410 init_floating_libfuncs (optable
, opname
, suffix
)
5415 enum machine_mode fmode
, dmode
, lmode
;
5417 fmode
= float_type_node
? TYPE_MODE (float_type_node
) : VOIDmode
;
5418 dmode
= double_type_node
? TYPE_MODE (double_type_node
) : VOIDmode
;
5419 lmode
= long_double_type_node
? TYPE_MODE (long_double_type_node
) : VOIDmode
;
5421 if (fmode
!= VOIDmode
)
5422 init_libfuncs (optable
, fmode
, fmode
, opname
, suffix
);
5423 if (dmode
!= fmode
&& dmode
!= VOIDmode
)
5424 init_libfuncs (optable
, dmode
, dmode
, opname
, suffix
);
5425 if (lmode
!= dmode
&& lmode
!= VOIDmode
)
5426 init_libfuncs (optable
, lmode
, lmode
, opname
, suffix
);
5430 init_one_libfunc (name
)
5433 /* Create a FUNCTION_DECL that can be passed to
5434 targetm.encode_section_info. */
5435 /* ??? We don't have any type information except for this is
5436 a function. Pretend this is "int foo()". */
5437 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5438 build_function_type (integer_type_node
, NULL_TREE
));
5439 DECL_ARTIFICIAL (decl
) = 1;
5440 DECL_EXTERNAL (decl
) = 1;
5441 TREE_PUBLIC (decl
) = 1;
5443 /* Return the symbol_ref from the mem rtx. */
5444 return XEXP (DECL_RTL (decl
), 0);
5447 /* Call this once to initialize the contents of the optabs
5448 appropriately for the current target machine. */
5453 unsigned int i
, j
, k
;
5455 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5457 for (i
= 0; i
< ARRAY_SIZE (fixtab
); i
++)
5458 for (j
= 0; j
< ARRAY_SIZE (fixtab
[0]); j
++)
5459 for (k
= 0; k
< ARRAY_SIZE (fixtab
[0][0]); k
++)
5460 fixtab
[i
][j
][k
] = CODE_FOR_nothing
;
5462 for (i
= 0; i
< ARRAY_SIZE (fixtrunctab
); i
++)
5463 for (j
= 0; j
< ARRAY_SIZE (fixtrunctab
[0]); j
++)
5464 for (k
= 0; k
< ARRAY_SIZE (fixtrunctab
[0][0]); k
++)
5465 fixtrunctab
[i
][j
][k
] = CODE_FOR_nothing
;
5467 for (i
= 0; i
< ARRAY_SIZE (floattab
); i
++)
5468 for (j
= 0; j
< ARRAY_SIZE (floattab
[0]); j
++)
5469 for (k
= 0; k
< ARRAY_SIZE (floattab
[0][0]); k
++)
5470 floattab
[i
][j
][k
] = CODE_FOR_nothing
;
5472 for (i
= 0; i
< ARRAY_SIZE (extendtab
); i
++)
5473 for (j
= 0; j
< ARRAY_SIZE (extendtab
[0]); j
++)
5474 for (k
= 0; k
< ARRAY_SIZE (extendtab
[0][0]); k
++)
5475 extendtab
[i
][j
][k
] = CODE_FOR_nothing
;
5477 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5478 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5480 #ifdef HAVE_conditional_move
5481 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5482 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5485 add_optab
= init_optab (PLUS
);
5486 addv_optab
= init_optabv (PLUS
);
5487 sub_optab
= init_optab (MINUS
);
5488 subv_optab
= init_optabv (MINUS
);
5489 smul_optab
= init_optab (MULT
);
5490 smulv_optab
= init_optabv (MULT
);
5491 smul_highpart_optab
= init_optab (UNKNOWN
);
5492 umul_highpart_optab
= init_optab (UNKNOWN
);
5493 smul_widen_optab
= init_optab (UNKNOWN
);
5494 umul_widen_optab
= init_optab (UNKNOWN
);
5495 sdiv_optab
= init_optab (DIV
);
5496 sdivv_optab
= init_optabv (DIV
);
5497 sdivmod_optab
= init_optab (UNKNOWN
);
5498 udiv_optab
= init_optab (UDIV
);
5499 udivmod_optab
= init_optab (UNKNOWN
);
5500 smod_optab
= init_optab (MOD
);
5501 umod_optab
= init_optab (UMOD
);
5502 ftrunc_optab
= init_optab (UNKNOWN
);
5503 and_optab
= init_optab (AND
);
5504 ior_optab
= init_optab (IOR
);
5505 xor_optab
= init_optab (XOR
);
5506 ashl_optab
= init_optab (ASHIFT
);
5507 ashr_optab
= init_optab (ASHIFTRT
);
5508 lshr_optab
= init_optab (LSHIFTRT
);
5509 rotl_optab
= init_optab (ROTATE
);
5510 rotr_optab
= init_optab (ROTATERT
);
5511 smin_optab
= init_optab (SMIN
);
5512 smax_optab
= init_optab (SMAX
);
5513 umin_optab
= init_optab (UMIN
);
5514 umax_optab
= init_optab (UMAX
);
5516 /* These three have codes assigned exclusively for the sake of
5518 mov_optab
= init_optab (SET
);
5519 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5520 cmp_optab
= init_optab (COMPARE
);
5522 ucmp_optab
= init_optab (UNKNOWN
);
5523 tst_optab
= init_optab (UNKNOWN
);
5524 neg_optab
= init_optab (NEG
);
5525 negv_optab
= init_optabv (NEG
);
5526 abs_optab
= init_optab (ABS
);
5527 absv_optab
= init_optabv (ABS
);
5528 addcc_optab
= init_optab (UNKNOWN
);
5529 one_cmpl_optab
= init_optab (NOT
);
5530 ffs_optab
= init_optab (FFS
);
5531 clz_optab
= init_optab (CLZ
);
5532 ctz_optab
= init_optab (CTZ
);
5533 popcount_optab
= init_optab (POPCOUNT
);
5534 parity_optab
= init_optab (PARITY
);
5535 sqrt_optab
= init_optab (SQRT
);
5536 floor_optab
= init_optab (UNKNOWN
);
5537 ceil_optab
= init_optab (UNKNOWN
);
5538 round_optab
= init_optab (UNKNOWN
);
5539 trunc_optab
= init_optab (UNKNOWN
);
5540 nearbyint_optab
= init_optab (UNKNOWN
);
5541 sin_optab
= init_optab (UNKNOWN
);
5542 cos_optab
= init_optab (UNKNOWN
);
5543 exp_optab
= init_optab (UNKNOWN
);
5544 log_optab
= init_optab (UNKNOWN
);
5545 strlen_optab
= init_optab (UNKNOWN
);
5546 cbranch_optab
= init_optab (UNKNOWN
);
5547 cmov_optab
= init_optab (UNKNOWN
);
5548 cstore_optab
= init_optab (UNKNOWN
);
5549 push_optab
= init_optab (UNKNOWN
);
5551 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5553 movstr_optab
[i
] = CODE_FOR_nothing
;
5554 clrstr_optab
[i
] = CODE_FOR_nothing
;
5556 #ifdef HAVE_SECONDARY_RELOADS
5557 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5561 /* Fill in the optabs with the insns we support. */
5564 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5565 /* This flag says the same insns that convert to a signed fixnum
5566 also convert validly to an unsigned one. */
5567 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5568 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
5569 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
5572 /* Initialize the optabs with the names of the library functions. */
5573 init_integral_libfuncs (add_optab
, "add", '3');
5574 init_floating_libfuncs (add_optab
, "add", '3');
5575 init_integral_libfuncs (addv_optab
, "addv", '3');
5576 init_floating_libfuncs (addv_optab
, "add", '3');
5577 init_integral_libfuncs (sub_optab
, "sub", '3');
5578 init_floating_libfuncs (sub_optab
, "sub", '3');
5579 init_integral_libfuncs (subv_optab
, "subv", '3');
5580 init_floating_libfuncs (subv_optab
, "sub", '3');
5581 init_integral_libfuncs (smul_optab
, "mul", '3');
5582 init_floating_libfuncs (smul_optab
, "mul", '3');
5583 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5584 init_floating_libfuncs (smulv_optab
, "mul", '3');
5585 init_integral_libfuncs (sdiv_optab
, "div", '3');
5586 init_floating_libfuncs (sdiv_optab
, "div", '3');
5587 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5588 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5589 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5590 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5591 init_integral_libfuncs (smod_optab
, "mod", '3');
5592 init_integral_libfuncs (umod_optab
, "umod", '3');
5593 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5594 init_integral_libfuncs (and_optab
, "and", '3');
5595 init_integral_libfuncs (ior_optab
, "ior", '3');
5596 init_integral_libfuncs (xor_optab
, "xor", '3');
5597 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5598 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5599 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5600 init_integral_libfuncs (smin_optab
, "min", '3');
5601 init_floating_libfuncs (smin_optab
, "min", '3');
5602 init_integral_libfuncs (smax_optab
, "max", '3');
5603 init_floating_libfuncs (smax_optab
, "max", '3');
5604 init_integral_libfuncs (umin_optab
, "umin", '3');
5605 init_integral_libfuncs (umax_optab
, "umax", '3');
5606 init_integral_libfuncs (neg_optab
, "neg", '2');
5607 init_floating_libfuncs (neg_optab
, "neg", '2');
5608 init_integral_libfuncs (negv_optab
, "negv", '2');
5609 init_floating_libfuncs (negv_optab
, "neg", '2');
5610 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5611 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5612 init_integral_libfuncs (clz_optab
, "clz", '2');
5613 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5614 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5615 init_integral_libfuncs (parity_optab
, "parity", '2');
5617 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5618 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5619 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5620 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5622 #ifdef MULSI3_LIBCALL
5623 smul_optab
->handlers
[(int) SImode
].libfunc
5624 = init_one_libfunc (MULSI3_LIBCALL
);
5626 #ifdef MULDI3_LIBCALL
5627 smul_optab
->handlers
[(int) DImode
].libfunc
5628 = init_one_libfunc (MULDI3_LIBCALL
);
5631 #ifdef DIVSI3_LIBCALL
5632 sdiv_optab
->handlers
[(int) SImode
].libfunc
5633 = init_one_libfunc (DIVSI3_LIBCALL
);
5635 #ifdef DIVDI3_LIBCALL
5636 sdiv_optab
->handlers
[(int) DImode
].libfunc
5637 = init_one_libfunc (DIVDI3_LIBCALL
);
5640 #ifdef UDIVSI3_LIBCALL
5641 udiv_optab
->handlers
[(int) SImode
].libfunc
5642 = init_one_libfunc (UDIVSI3_LIBCALL
);
5644 #ifdef UDIVDI3_LIBCALL
5645 udiv_optab
->handlers
[(int) DImode
].libfunc
5646 = init_one_libfunc (UDIVDI3_LIBCALL
);
5649 #ifdef MODSI3_LIBCALL
5650 smod_optab
->handlers
[(int) SImode
].libfunc
5651 = init_one_libfunc (MODSI3_LIBCALL
);
5653 #ifdef MODDI3_LIBCALL
5654 smod_optab
->handlers
[(int) DImode
].libfunc
5655 = init_one_libfunc (MODDI3_LIBCALL
);
5658 #ifdef UMODSI3_LIBCALL
5659 umod_optab
->handlers
[(int) SImode
].libfunc
5660 = init_one_libfunc (UMODSI3_LIBCALL
);
5662 #ifdef UMODDI3_LIBCALL
5663 umod_optab
->handlers
[(int) DImode
].libfunc
5664 = init_one_libfunc (UMODDI3_LIBCALL
);
5667 /* Use cabs for DC complex abs, since systems generally have cabs.
5668 Don't define any libcall for SCmode, so that cabs will be used. */
5669 abs_optab
->handlers
[(int) DCmode
].libfunc
5670 = init_one_libfunc ("cabs");
5672 /* The ffs function operates on `int'. */
5673 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5674 = init_one_libfunc ("ffs");
5676 extendsfdf2_libfunc
= init_one_libfunc ("__extendsfdf2");
5677 extendsfxf2_libfunc
= init_one_libfunc ("__extendsfxf2");
5678 extendsftf2_libfunc
= init_one_libfunc ("__extendsftf2");
5679 extenddfxf2_libfunc
= init_one_libfunc ("__extenddfxf2");
5680 extenddftf2_libfunc
= init_one_libfunc ("__extenddftf2");
5682 truncdfsf2_libfunc
= init_one_libfunc ("__truncdfsf2");
5683 truncxfsf2_libfunc
= init_one_libfunc ("__truncxfsf2");
5684 trunctfsf2_libfunc
= init_one_libfunc ("__trunctfsf2");
5685 truncxfdf2_libfunc
= init_one_libfunc ("__truncxfdf2");
5686 trunctfdf2_libfunc
= init_one_libfunc ("__trunctfdf2");
5688 abort_libfunc
= init_one_libfunc ("abort");
5689 memcpy_libfunc
= init_one_libfunc ("memcpy");
5690 memmove_libfunc
= init_one_libfunc ("memmove");
5691 bcopy_libfunc
= init_one_libfunc ("bcopy");
5692 memcmp_libfunc
= init_one_libfunc ("memcmp");
5693 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5694 memset_libfunc
= init_one_libfunc ("memset");
5695 bzero_libfunc
= init_one_libfunc ("bzero");
5697 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5698 ? "_Unwind_SjLj_Resume"
5699 : "_Unwind_Resume");
5700 #ifndef DONT_USE_BUILTIN_SETJMP
5701 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5702 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5704 setjmp_libfunc
= init_one_libfunc ("setjmp");
5705 longjmp_libfunc
= init_one_libfunc ("longjmp");
5707 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5708 unwind_sjlj_unregister_libfunc
5709 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5711 eqhf2_libfunc
= init_one_libfunc ("__eqhf2");
5712 nehf2_libfunc
= init_one_libfunc ("__nehf2");
5713 gthf2_libfunc
= init_one_libfunc ("__gthf2");
5714 gehf2_libfunc
= init_one_libfunc ("__gehf2");
5715 lthf2_libfunc
= init_one_libfunc ("__lthf2");
5716 lehf2_libfunc
= init_one_libfunc ("__lehf2");
5717 unordhf2_libfunc
= init_one_libfunc ("__unordhf2");
5719 eqsf2_libfunc
= init_one_libfunc ("__eqsf2");
5720 nesf2_libfunc
= init_one_libfunc ("__nesf2");
5721 gtsf2_libfunc
= init_one_libfunc ("__gtsf2");
5722 gesf2_libfunc
= init_one_libfunc ("__gesf2");
5723 ltsf2_libfunc
= init_one_libfunc ("__ltsf2");
5724 lesf2_libfunc
= init_one_libfunc ("__lesf2");
5725 unordsf2_libfunc
= init_one_libfunc ("__unordsf2");
5727 eqdf2_libfunc
= init_one_libfunc ("__eqdf2");
5728 nedf2_libfunc
= init_one_libfunc ("__nedf2");
5729 gtdf2_libfunc
= init_one_libfunc ("__gtdf2");
5730 gedf2_libfunc
= init_one_libfunc ("__gedf2");
5731 ltdf2_libfunc
= init_one_libfunc ("__ltdf2");
5732 ledf2_libfunc
= init_one_libfunc ("__ledf2");
5733 unorddf2_libfunc
= init_one_libfunc ("__unorddf2");
5735 eqxf2_libfunc
= init_one_libfunc ("__eqxf2");
5736 nexf2_libfunc
= init_one_libfunc ("__nexf2");
5737 gtxf2_libfunc
= init_one_libfunc ("__gtxf2");
5738 gexf2_libfunc
= init_one_libfunc ("__gexf2");
5739 ltxf2_libfunc
= init_one_libfunc ("__ltxf2");
5740 lexf2_libfunc
= init_one_libfunc ("__lexf2");
5741 unordxf2_libfunc
= init_one_libfunc ("__unordxf2");
5743 eqtf2_libfunc
= init_one_libfunc ("__eqtf2");
5744 netf2_libfunc
= init_one_libfunc ("__netf2");
5745 gttf2_libfunc
= init_one_libfunc ("__gttf2");
5746 getf2_libfunc
= init_one_libfunc ("__getf2");
5747 lttf2_libfunc
= init_one_libfunc ("__lttf2");
5748 letf2_libfunc
= init_one_libfunc ("__letf2");
5749 unordtf2_libfunc
= init_one_libfunc ("__unordtf2");
5751 floatsisf_libfunc
= init_one_libfunc ("__floatsisf");
5752 floatdisf_libfunc
= init_one_libfunc ("__floatdisf");
5753 floattisf_libfunc
= init_one_libfunc ("__floattisf");
5755 floatsidf_libfunc
= init_one_libfunc ("__floatsidf");
5756 floatdidf_libfunc
= init_one_libfunc ("__floatdidf");
5757 floattidf_libfunc
= init_one_libfunc ("__floattidf");
5759 floatsixf_libfunc
= init_one_libfunc ("__floatsixf");
5760 floatdixf_libfunc
= init_one_libfunc ("__floatdixf");
5761 floattixf_libfunc
= init_one_libfunc ("__floattixf");
5763 floatsitf_libfunc
= init_one_libfunc ("__floatsitf");
5764 floatditf_libfunc
= init_one_libfunc ("__floatditf");
5765 floattitf_libfunc
= init_one_libfunc ("__floattitf");
5767 fixsfsi_libfunc
= init_one_libfunc ("__fixsfsi");
5768 fixsfdi_libfunc
= init_one_libfunc ("__fixsfdi");
5769 fixsfti_libfunc
= init_one_libfunc ("__fixsfti");
5771 fixdfsi_libfunc
= init_one_libfunc ("__fixdfsi");
5772 fixdfdi_libfunc
= init_one_libfunc ("__fixdfdi");
5773 fixdfti_libfunc
= init_one_libfunc ("__fixdfti");
5775 fixxfsi_libfunc
= init_one_libfunc ("__fixxfsi");
5776 fixxfdi_libfunc
= init_one_libfunc ("__fixxfdi");
5777 fixxfti_libfunc
= init_one_libfunc ("__fixxfti");
5779 fixtfsi_libfunc
= init_one_libfunc ("__fixtfsi");
5780 fixtfdi_libfunc
= init_one_libfunc ("__fixtfdi");
5781 fixtfti_libfunc
= init_one_libfunc ("__fixtfti");
5783 fixunssfsi_libfunc
= init_one_libfunc ("__fixunssfsi");
5784 fixunssfdi_libfunc
= init_one_libfunc ("__fixunssfdi");
5785 fixunssfti_libfunc
= init_one_libfunc ("__fixunssfti");
5787 fixunsdfsi_libfunc
= init_one_libfunc ("__fixunsdfsi");
5788 fixunsdfdi_libfunc
= init_one_libfunc ("__fixunsdfdi");
5789 fixunsdfti_libfunc
= init_one_libfunc ("__fixunsdfti");
5791 fixunsxfsi_libfunc
= init_one_libfunc ("__fixunsxfsi");
5792 fixunsxfdi_libfunc
= init_one_libfunc ("__fixunsxfdi");
5793 fixunsxfti_libfunc
= init_one_libfunc ("__fixunsxfti");
5795 fixunstfsi_libfunc
= init_one_libfunc ("__fixunstfsi");
5796 fixunstfdi_libfunc
= init_one_libfunc ("__fixunstfdi");
5797 fixunstfti_libfunc
= init_one_libfunc ("__fixunstfti");
5799 /* For function entry/exit instrumentation. */
5800 profile_function_entry_libfunc
5801 = init_one_libfunc ("__cyg_profile_func_enter");
5802 profile_function_exit_libfunc
5803 = init_one_libfunc ("__cyg_profile_func_exit");
5805 #ifdef HAVE_conditional_trap
5809 #ifdef INIT_TARGET_OPTABS
5810 /* Allow the target to add more libcalls or rename some, etc. */
5815 static GTY(()) rtx trap_rtx
;
5817 #ifdef HAVE_conditional_trap
5818 /* The insn generating function can not take an rtx_code argument.
5819 TRAP_RTX is used as an rtx argument. Its code is replaced with
5820 the code to be used in the trap insn and all other fields are
5826 if (HAVE_conditional_trap
)
5828 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5833 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5834 CODE. Return 0 on failure. */
5837 gen_cond_trap (code
, op1
, op2
, tcode
)
5838 enum rtx_code code ATTRIBUTE_UNUSED
;
5839 rtx op1
, op2 ATTRIBUTE_UNUSED
, tcode ATTRIBUTE_UNUSED
;
5841 enum machine_mode mode
= GET_MODE (op1
);
5843 if (mode
== VOIDmode
)
5846 #ifdef HAVE_conditional_trap
5847 if (HAVE_conditional_trap
5848 && cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
5852 emit_insn (GEN_FCN (cmp_optab
->handlers
[(int) mode
].insn_code
) (op1
, op2
));
5853 PUT_CODE (trap_rtx
, code
);
5854 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5858 insn
= get_insns ();
5868 #include "gt-optabs.h"