1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* The insn generating function can not take an rtx_code argument.
88 TRAP_RTX is used as an rtx argument. Its code is replaced with
89 the code to be used in the trap insn and all other fields are ignored. */
90 static GTY(()) rtx trap_rtx
;
92 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
93 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
95 static int expand_cmplxdiv_straight (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
96 enum machine_mode
, int,
97 enum optab_methods
, enum mode_class
,
99 static int expand_cmplxdiv_wide (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
,
100 enum machine_mode
, int, enum optab_methods
,
101 enum mode_class
, optab
);
102 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
103 enum machine_mode
*, int *,
104 enum can_compare_purpose
);
105 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
107 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
108 static rtx
ftruncify (rtx
);
109 static optab
new_optab (void);
110 static convert_optab
new_convert_optab (void);
111 static inline optab
init_optab (enum rtx_code
);
112 static inline optab
init_optabv (enum rtx_code
);
113 static inline convert_optab
init_convert_optab (enum rtx_code
);
114 static void init_libfuncs (optab
, int, int, const char *, int);
115 static void init_integral_libfuncs (optab
, const char *, int);
116 static void init_floating_libfuncs (optab
, const char *, int);
117 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, enum mode_class
);
119 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
120 enum mode_class
, bool);
121 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
122 enum rtx_code
, int, rtx
);
123 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
124 enum machine_mode
*, int *);
125 static rtx
expand_vector_binop (enum machine_mode
, optab
, rtx
, rtx
, rtx
, int,
127 static rtx
expand_vector_unop (enum machine_mode
, optab
, rtx
, rtx
, int);
128 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
129 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
131 #ifndef HAVE_conditional_trap
132 #define HAVE_conditional_trap 0
133 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
136 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
137 the result of operation CODE applied to OP0 (and OP1 if it is a binary
140 If the last insn does not set TARGET, don't do anything, but return 1.
142 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
143 don't add the REG_EQUAL note but return 0. Our caller can then try
144 again, ensuring that TARGET is not one of the operands. */
147 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
149 rtx last_insn
, insn
, set
;
154 || NEXT_INSN (insns
) == NULL_RTX
)
157 if (GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
158 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == '1')
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Generate code to perform a straightforward complex divide. */
245 expand_cmplxdiv_straight (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
,
246 rtx realr
, rtx imagr
, enum machine_mode submode
,
247 int unsignedp
, enum optab_methods methods
,
248 enum mode_class
class, optab binoptab
)
254 optab this_add_optab
= add_optab
;
255 optab this_sub_optab
= sub_optab
;
256 optab this_neg_optab
= neg_optab
;
257 optab this_mul_optab
= smul_optab
;
259 if (binoptab
== sdivv_optab
)
261 this_add_optab
= addv_optab
;
262 this_sub_optab
= subv_optab
;
263 this_neg_optab
= negv_optab
;
264 this_mul_optab
= smulv_optab
;
267 /* Don't fetch these from memory more than once. */
268 real0
= force_reg (submode
, real0
);
269 real1
= force_reg (submode
, real1
);
272 imag0
= force_reg (submode
, imag0
);
274 imag1
= force_reg (submode
, imag1
);
276 /* Divisor: c*c + d*d. */
277 temp1
= expand_binop (submode
, this_mul_optab
, real1
, real1
,
278 NULL_RTX
, unsignedp
, methods
);
280 temp2
= expand_binop (submode
, this_mul_optab
, imag1
, imag1
,
281 NULL_RTX
, unsignedp
, methods
);
283 if (temp1
== 0 || temp2
== 0)
286 divisor
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
287 NULL_RTX
, unsignedp
, methods
);
293 /* Mathematically, ((a)(c-id))/divisor. */
294 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
296 /* Calculate the dividend. */
297 real_t
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
298 NULL_RTX
, unsignedp
, methods
);
300 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
301 NULL_RTX
, unsignedp
, methods
);
303 if (real_t
== 0 || imag_t
== 0)
306 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
307 NULL_RTX
, unsignedp
);
311 /* Mathematically, ((a+ib)(c-id))/divider. */
312 /* Calculate the dividend. */
313 temp1
= expand_binop (submode
, this_mul_optab
, real0
, real1
,
314 NULL_RTX
, unsignedp
, methods
);
316 temp2
= expand_binop (submode
, this_mul_optab
, imag0
, imag1
,
317 NULL_RTX
, unsignedp
, methods
);
319 if (temp1
== 0 || temp2
== 0)
322 real_t
= expand_binop (submode
, this_add_optab
, temp1
, temp2
,
323 NULL_RTX
, unsignedp
, methods
);
325 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, real1
,
326 NULL_RTX
, unsignedp
, methods
);
328 temp2
= expand_binop (submode
, this_mul_optab
, real0
, imag1
,
329 NULL_RTX
, unsignedp
, methods
);
331 if (temp1
== 0 || temp2
== 0)
334 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, temp2
,
335 NULL_RTX
, unsignedp
, methods
);
337 if (real_t
== 0 || imag_t
== 0)
341 if (class == MODE_COMPLEX_FLOAT
)
342 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
343 realr
, unsignedp
, methods
);
345 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
346 real_t
, divisor
, realr
, unsignedp
);
352 emit_move_insn (realr
, res
);
354 if (class == MODE_COMPLEX_FLOAT
)
355 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
356 imagr
, unsignedp
, methods
);
358 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
359 imag_t
, divisor
, imagr
, unsignedp
);
365 emit_move_insn (imagr
, res
);
370 /* Generate code to perform a wide-input-range-acceptable complex divide. */
373 expand_cmplxdiv_wide (rtx real0
, rtx real1
, rtx imag0
, rtx imag1
, rtx realr
,
374 rtx imagr
, enum machine_mode submode
, int unsignedp
,
375 enum optab_methods methods
, enum mode_class
class,
380 rtx temp1
, temp2
, lab1
, lab2
;
381 enum machine_mode mode
;
383 optab this_add_optab
= add_optab
;
384 optab this_sub_optab
= sub_optab
;
385 optab this_neg_optab
= neg_optab
;
386 optab this_mul_optab
= smul_optab
;
388 if (binoptab
== sdivv_optab
)
390 this_add_optab
= addv_optab
;
391 this_sub_optab
= subv_optab
;
392 this_neg_optab
= negv_optab
;
393 this_mul_optab
= smulv_optab
;
396 /* Don't fetch these from memory more than once. */
397 real0
= force_reg (submode
, real0
);
398 real1
= force_reg (submode
, real1
);
401 imag0
= force_reg (submode
, imag0
);
403 imag1
= force_reg (submode
, imag1
);
405 /* XXX What's an "unsigned" complex number? */
413 temp1
= expand_abs (submode
, real1
, NULL_RTX
, unsignedp
, 1);
414 temp2
= expand_abs (submode
, imag1
, NULL_RTX
, unsignedp
, 1);
417 if (temp1
== 0 || temp2
== 0)
420 mode
= GET_MODE (temp1
);
421 lab1
= gen_label_rtx ();
422 emit_cmp_and_jump_insns (temp1
, temp2
, LT
, NULL_RTX
,
423 mode
, unsignedp
, lab1
);
425 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
427 if (class == MODE_COMPLEX_FLOAT
)
428 ratio
= expand_binop (submode
, binoptab
, imag1
, real1
,
429 NULL_RTX
, unsignedp
, methods
);
431 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
432 imag1
, real1
, NULL_RTX
, unsignedp
);
437 /* Calculate divisor. */
439 temp1
= expand_binop (submode
, this_mul_optab
, imag1
, ratio
,
440 NULL_RTX
, unsignedp
, methods
);
445 divisor
= expand_binop (submode
, this_add_optab
, temp1
, real1
,
446 NULL_RTX
, unsignedp
, methods
);
451 /* Calculate dividend. */
457 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
459 imag_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
460 NULL_RTX
, unsignedp
, methods
);
465 imag_t
= expand_unop (submode
, this_neg_optab
, imag_t
,
466 NULL_RTX
, unsignedp
);
468 if (real_t
== 0 || imag_t
== 0)
473 /* Compute (a+ib)/(c+id) as
474 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
476 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
477 NULL_RTX
, unsignedp
, methods
);
482 real_t
= expand_binop (submode
, this_add_optab
, temp1
, real0
,
483 NULL_RTX
, unsignedp
, methods
);
485 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
486 NULL_RTX
, unsignedp
, methods
);
491 imag_t
= expand_binop (submode
, this_sub_optab
, imag0
, temp1
,
492 NULL_RTX
, unsignedp
, methods
);
494 if (real_t
== 0 || imag_t
== 0)
498 if (class == MODE_COMPLEX_FLOAT
)
499 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
500 realr
, unsignedp
, methods
);
502 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
503 real_t
, divisor
, realr
, unsignedp
);
509 emit_move_insn (realr
, res
);
511 if (class == MODE_COMPLEX_FLOAT
)
512 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
513 imagr
, unsignedp
, methods
);
515 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
516 imag_t
, divisor
, imagr
, unsignedp
);
522 emit_move_insn (imagr
, res
);
524 lab2
= gen_label_rtx ();
525 emit_jump_insn (gen_jump (lab2
));
530 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
532 if (class == MODE_COMPLEX_FLOAT
)
533 ratio
= expand_binop (submode
, binoptab
, real1
, imag1
,
534 NULL_RTX
, unsignedp
, methods
);
536 ratio
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
537 real1
, imag1
, NULL_RTX
, unsignedp
);
542 /* Calculate divisor. */
544 temp1
= expand_binop (submode
, this_mul_optab
, real1
, ratio
,
545 NULL_RTX
, unsignedp
, methods
);
550 divisor
= expand_binop (submode
, this_add_optab
, temp1
, imag1
,
551 NULL_RTX
, unsignedp
, methods
);
556 /* Calculate dividend. */
560 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
562 real_t
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
563 NULL_RTX
, unsignedp
, methods
);
565 imag_t
= expand_unop (submode
, this_neg_optab
, real0
,
566 NULL_RTX
, unsignedp
);
568 if (real_t
== 0 || imag_t
== 0)
573 /* Compute (a+ib)/(c+id) as
574 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
576 temp1
= expand_binop (submode
, this_mul_optab
, real0
, ratio
,
577 NULL_RTX
, unsignedp
, methods
);
582 real_t
= expand_binop (submode
, this_add_optab
, temp1
, imag0
,
583 NULL_RTX
, unsignedp
, methods
);
585 temp1
= expand_binop (submode
, this_mul_optab
, imag0
, ratio
,
586 NULL_RTX
, unsignedp
, methods
);
591 imag_t
= expand_binop (submode
, this_sub_optab
, temp1
, real0
,
592 NULL_RTX
, unsignedp
, methods
);
594 if (real_t
== 0 || imag_t
== 0)
598 if (class == MODE_COMPLEX_FLOAT
)
599 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
600 realr
, unsignedp
, methods
);
602 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
603 real_t
, divisor
, realr
, unsignedp
);
609 emit_move_insn (realr
, res
);
611 if (class == MODE_COMPLEX_FLOAT
)
612 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
613 imagr
, unsignedp
, methods
);
615 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
616 imag_t
, divisor
, imagr
, unsignedp
);
622 emit_move_insn (imagr
, res
);
629 /* Wrapper around expand_binop which takes an rtx code to specify
630 the operation to perform, not an optab pointer. All other
631 arguments are the same. */
633 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
634 rtx op1
, rtx target
, int unsignedp
,
635 enum optab_methods methods
)
637 optab binop
= code_to_optab
[(int) code
];
641 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
644 /* Generate code to perform an operation specified by BINOPTAB
645 on operands OP0 and OP1, with result having machine-mode MODE.
647 UNSIGNEDP is for the case where we have to widen the operands
648 to perform the operation. It says to use zero-extension.
650 If TARGET is nonzero, the value
651 is generated there, if it is convenient to do so.
652 In all cases an rtx is returned for the locus of the value;
653 this may or may not be TARGET. */
656 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
657 rtx target
, int unsignedp
, enum optab_methods methods
)
659 enum optab_methods next_methods
660 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
661 ? OPTAB_WIDEN
: methods
);
662 enum mode_class
class;
663 enum machine_mode wider_mode
;
665 int commutative_op
= 0;
666 int shift_op
= (binoptab
->code
== ASHIFT
667 || binoptab
->code
== ASHIFTRT
668 || binoptab
->code
== LSHIFTRT
669 || binoptab
->code
== ROTATE
670 || binoptab
->code
== ROTATERT
);
671 rtx entry_last
= get_last_insn ();
674 class = GET_MODE_CLASS (mode
);
676 op0
= protect_from_queue (op0
, 0);
677 op1
= protect_from_queue (op1
, 0);
679 target
= protect_from_queue (target
, 1);
683 /* Load duplicate non-volatile operands once. */
684 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
686 op0
= force_not_mem (op0
);
691 op0
= force_not_mem (op0
);
692 op1
= force_not_mem (op1
);
696 /* If subtracting an integer constant, convert this into an addition of
697 the negated constant. */
699 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
701 op1
= negate_rtx (mode
, op1
);
702 binoptab
= add_optab
;
705 /* If we are inside an appropriately-short loop and one operand is an
706 expensive constant, force it into a register. */
707 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
708 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
709 op0
= force_reg (mode
, op0
);
711 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
712 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
713 op1
= force_reg (mode
, op1
);
715 /* Record where to delete back to if we backtrack. */
716 last
= get_last_insn ();
718 /* If operation is commutative,
719 try to make the first operand a register.
720 Even better, try to make it the same as the target.
721 Also try to make the last operand a constant. */
722 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
723 || binoptab
== smul_widen_optab
724 || binoptab
== umul_widen_optab
725 || binoptab
== smul_highpart_optab
726 || binoptab
== umul_highpart_optab
)
730 if (((target
== 0 || GET_CODE (target
) == REG
)
731 ? ((GET_CODE (op1
) == REG
732 && GET_CODE (op0
) != REG
)
734 : rtx_equal_p (op1
, target
))
735 || GET_CODE (op0
) == CONST_INT
)
743 /* If we can do it with a three-operand insn, do so. */
745 if (methods
!= OPTAB_MUST_WIDEN
746 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
748 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
749 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
750 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
752 rtx xop0
= op0
, xop1
= op1
;
757 temp
= gen_reg_rtx (mode
);
759 /* If it is a commutative operator and the modes would match
760 if we would swap the operands, we can save the conversions. */
763 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
764 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
768 tmp
= op0
; op0
= op1
; op1
= tmp
;
769 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
773 /* In case the insn wants input operands in modes different from
774 those of the actual operands, convert the operands. It would
775 seem that we don't need to convert CONST_INTs, but we do, so
776 that they're properly zero-extended, sign-extended or truncated
779 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
780 xop0
= convert_modes (mode0
,
781 GET_MODE (op0
) != VOIDmode
786 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
787 xop1
= convert_modes (mode1
,
788 GET_MODE (op1
) != VOIDmode
793 /* Now, if insn's predicates don't allow our operands, put them into
796 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
797 && mode0
!= VOIDmode
)
798 xop0
= copy_to_mode_reg (mode0
, xop0
);
800 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
801 && mode1
!= VOIDmode
)
802 xop1
= copy_to_mode_reg (mode1
, xop1
);
804 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
805 temp
= gen_reg_rtx (mode
);
807 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
810 /* If PAT is composed of more than one insn, try to add an appropriate
811 REG_EQUAL note to it. If we can't because TEMP conflicts with an
812 operand, call ourselves again, this time without a target. */
813 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
814 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
816 delete_insns_since (last
);
817 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
825 delete_insns_since (last
);
828 /* If this is a multiply, see if we can do a widening operation that
829 takes operands of this mode and makes a wider mode. */
831 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
832 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
833 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
834 != CODE_FOR_nothing
))
836 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
837 unsignedp
? umul_widen_optab
: smul_widen_optab
,
838 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
842 if (GET_MODE_CLASS (mode
) == MODE_INT
)
843 return gen_lowpart (mode
, temp
);
845 return convert_to_mode (mode
, temp
, unsignedp
);
849 /* Look for a wider mode of the same class for which we think we
850 can open-code the operation. Check for a widening multiply at the
851 wider mode as well. */
853 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
854 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
855 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
856 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
858 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
859 || (binoptab
== smul_optab
860 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
861 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
862 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
863 != CODE_FOR_nothing
)))
865 rtx xop0
= op0
, xop1
= op1
;
868 /* For certain integer operations, we need not actually extend
869 the narrow operands, as long as we will truncate
870 the results to the same narrowness. */
872 if ((binoptab
== ior_optab
|| binoptab
== and_optab
873 || binoptab
== xor_optab
874 || binoptab
== add_optab
|| binoptab
== sub_optab
875 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
876 && class == MODE_INT
)
879 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
881 /* The second operand of a shift must always be extended. */
882 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
883 no_extend
&& binoptab
!= ashl_optab
);
885 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
886 unsignedp
, OPTAB_DIRECT
);
889 if (class != MODE_INT
)
892 target
= gen_reg_rtx (mode
);
893 convert_move (target
, temp
, 0);
897 return gen_lowpart (mode
, temp
);
900 delete_insns_since (last
);
904 /* These can be done a word at a time. */
905 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
907 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
908 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
914 /* If TARGET is the same as one of the operands, the REG_EQUAL note
915 won't be accurate, so use a new target. */
916 if (target
== 0 || target
== op0
|| target
== op1
)
917 target
= gen_reg_rtx (mode
);
921 /* Do the actual arithmetic. */
922 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
924 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
925 rtx x
= expand_binop (word_mode
, binoptab
,
926 operand_subword_force (op0
, i
, mode
),
927 operand_subword_force (op1
, i
, mode
),
928 target_piece
, unsignedp
, next_methods
);
933 if (target_piece
!= x
)
934 emit_move_insn (target_piece
, x
);
937 insns
= get_insns ();
940 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
942 if (binoptab
->code
!= UNKNOWN
)
944 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
945 copy_rtx (op0
), copy_rtx (op1
));
949 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
954 /* Synthesize double word shifts from single word shifts. */
955 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
956 || binoptab
== ashr_optab
)
958 && GET_CODE (op1
) == CONST_INT
959 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
960 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
961 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
962 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
964 rtx insns
, inter
, equiv_value
;
965 rtx into_target
, outof_target
;
966 rtx into_input
, outof_input
;
967 int shift_count
, left_shift
, outof_word
;
969 /* If TARGET is the same as one of the operands, the REG_EQUAL note
970 won't be accurate, so use a new target. */
971 if (target
== 0 || target
== op0
|| target
== op1
)
972 target
= gen_reg_rtx (mode
);
976 shift_count
= INTVAL (op1
);
978 /* OUTOF_* is the word we are shifting bits away from, and
979 INTO_* is the word that we are shifting bits towards, thus
980 they differ depending on the direction of the shift and
983 left_shift
= binoptab
== ashl_optab
;
984 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
986 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
987 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
989 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
990 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
992 if (shift_count
>= BITS_PER_WORD
)
994 inter
= expand_binop (word_mode
, binoptab
,
996 GEN_INT (shift_count
- BITS_PER_WORD
),
997 into_target
, unsignedp
, next_methods
);
999 if (inter
!= 0 && inter
!= into_target
)
1000 emit_move_insn (into_target
, inter
);
1002 /* For a signed right shift, we must fill the word we are shifting
1003 out of with copies of the sign bit. Otherwise it is zeroed. */
1004 if (inter
!= 0 && binoptab
!= ashr_optab
)
1005 inter
= CONST0_RTX (word_mode
);
1006 else if (inter
!= 0)
1007 inter
= expand_binop (word_mode
, binoptab
,
1009 GEN_INT (BITS_PER_WORD
- 1),
1010 outof_target
, unsignedp
, next_methods
);
1012 if (inter
!= 0 && inter
!= outof_target
)
1013 emit_move_insn (outof_target
, inter
);
1018 optab reverse_unsigned_shift
, unsigned_shift
;
1020 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1021 we must do a logical shift in the opposite direction of the
1024 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
1026 /* For a shift of less than BITS_PER_WORD, to compute the word
1027 shifted towards, we need to unsigned shift the orig value of
1030 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
1032 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
1034 GEN_INT (BITS_PER_WORD
- shift_count
),
1035 0, unsignedp
, next_methods
);
1040 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
1041 op1
, 0, unsignedp
, next_methods
);
1044 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
1045 into_target
, unsignedp
, next_methods
);
1047 if (inter
!= 0 && inter
!= into_target
)
1048 emit_move_insn (into_target
, inter
);
1051 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
1052 op1
, outof_target
, unsignedp
, next_methods
);
1054 if (inter
!= 0 && inter
!= outof_target
)
1055 emit_move_insn (outof_target
, inter
);
1058 insns
= get_insns ();
1063 if (binoptab
->code
!= UNKNOWN
)
1064 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1068 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1073 /* Synthesize double word rotates from single word shifts. */
1074 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1075 && class == MODE_INT
1076 && GET_CODE (op1
) == CONST_INT
1077 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1078 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1079 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1081 rtx insns
, equiv_value
;
1082 rtx into_target
, outof_target
;
1083 rtx into_input
, outof_input
;
1085 int shift_count
, left_shift
, outof_word
;
1087 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1088 won't be accurate, so use a new target. */
1089 if (target
== 0 || target
== op0
|| target
== op1
)
1090 target
= gen_reg_rtx (mode
);
1094 shift_count
= INTVAL (op1
);
1096 /* OUTOF_* is the word we are shifting bits away from, and
1097 INTO_* is the word that we are shifting bits towards, thus
1098 they differ depending on the direction of the shift and
1099 WORDS_BIG_ENDIAN. */
1101 left_shift
= (binoptab
== rotl_optab
);
1102 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1104 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1105 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1107 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1108 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1110 if (shift_count
== BITS_PER_WORD
)
1112 /* This is just a word swap. */
1113 emit_move_insn (outof_target
, into_input
);
1114 emit_move_insn (into_target
, outof_input
);
1119 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1120 rtx first_shift_count
, second_shift_count
;
1121 optab reverse_unsigned_shift
, unsigned_shift
;
1123 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1124 ? lshr_optab
: ashl_optab
);
1126 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1127 ? ashl_optab
: lshr_optab
);
1129 if (shift_count
> BITS_PER_WORD
)
1131 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1132 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1136 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1137 second_shift_count
= GEN_INT (shift_count
);
1140 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1141 outof_input
, first_shift_count
,
1142 NULL_RTX
, unsignedp
, next_methods
);
1143 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1144 into_input
, second_shift_count
,
1145 NULL_RTX
, unsignedp
, next_methods
);
1147 if (into_temp1
!= 0 && into_temp2
!= 0)
1148 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1149 into_target
, unsignedp
, next_methods
);
1153 if (inter
!= 0 && inter
!= into_target
)
1154 emit_move_insn (into_target
, inter
);
1156 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1157 into_input
, first_shift_count
,
1158 NULL_RTX
, unsignedp
, next_methods
);
1159 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1160 outof_input
, second_shift_count
,
1161 NULL_RTX
, unsignedp
, next_methods
);
1163 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1164 inter
= expand_binop (word_mode
, ior_optab
,
1165 outof_temp1
, outof_temp2
,
1166 outof_target
, unsignedp
, next_methods
);
1168 if (inter
!= 0 && inter
!= outof_target
)
1169 emit_move_insn (outof_target
, inter
);
1172 insns
= get_insns ();
1177 if (binoptab
->code
!= UNKNOWN
)
1178 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1182 /* We can't make this a no conflict block if this is a word swap,
1183 because the word swap case fails if the input and output values
1184 are in the same register. */
1185 if (shift_count
!= BITS_PER_WORD
)
1186 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1195 /* These can be done a word at a time by propagating carries. */
1196 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1197 && class == MODE_INT
1198 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1199 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1202 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1203 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1204 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1205 rtx xop0
, xop1
, xtarget
;
1207 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1208 value is one of those, use it. Otherwise, use 1 since it is the
1209 one easiest to get. */
1210 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1211 int normalizep
= STORE_FLAG_VALUE
;
1216 /* Prepare the operands. */
1217 xop0
= force_reg (mode
, op0
);
1218 xop1
= force_reg (mode
, op1
);
1220 xtarget
= gen_reg_rtx (mode
);
1222 if (target
== 0 || GET_CODE (target
) != REG
)
1225 /* Indicate for flow that the entire target reg is being set. */
1226 if (GET_CODE (target
) == REG
)
1227 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1229 /* Do the actual arithmetic. */
1230 for (i
= 0; i
< nwords
; i
++)
1232 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1233 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1234 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1235 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1238 /* Main add/subtract of the input operands. */
1239 x
= expand_binop (word_mode
, binoptab
,
1240 op0_piece
, op1_piece
,
1241 target_piece
, unsignedp
, next_methods
);
1247 /* Store carry from main add/subtract. */
1248 carry_out
= gen_reg_rtx (word_mode
);
1249 carry_out
= emit_store_flag_force (carry_out
,
1250 (binoptab
== add_optab
1253 word_mode
, 1, normalizep
);
1260 /* Add/subtract previous carry to main result. */
1261 newx
= expand_binop (word_mode
,
1262 normalizep
== 1 ? binoptab
: otheroptab
,
1264 NULL_RTX
, 1, next_methods
);
1268 /* Get out carry from adding/subtracting carry in. */
1269 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1270 carry_tmp
= emit_store_flag_force (carry_tmp
,
1271 (binoptab
== add_optab
1274 word_mode
, 1, normalizep
);
1276 /* Logical-ior the two poss. carry together. */
1277 carry_out
= expand_binop (word_mode
, ior_optab
,
1278 carry_out
, carry_tmp
,
1279 carry_out
, 0, next_methods
);
1283 emit_move_insn (target_piece
, newx
);
1286 carry_in
= carry_out
;
1289 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1291 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1292 || ! rtx_equal_p (target
, xtarget
))
1294 rtx temp
= emit_move_insn (target
, xtarget
);
1296 set_unique_reg_note (temp
,
1298 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1309 delete_insns_since (last
);
1312 /* If we want to multiply two two-word values and have normal and widening
1313 multiplies of single-word values, we can do this with three smaller
1314 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1315 because we are not operating on one word at a time.
1317 The multiplication proceeds as follows:
1318 _______________________
1319 [__op0_high_|__op0_low__]
1320 _______________________
1321 * [__op1_high_|__op1_low__]
1322 _______________________________________________
1323 _______________________
1324 (1) [__op0_low__*__op1_low__]
1325 _______________________
1326 (2a) [__op0_low__*__op1_high_]
1327 _______________________
1328 (2b) [__op0_high_*__op1_low__]
1329 _______________________
1330 (3) [__op0_high_*__op1_high_]
1333 This gives a 4-word result. Since we are only interested in the
1334 lower 2 words, partial result (3) and the upper words of (2a) and
1335 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1336 calculated using non-widening multiplication.
1338 (1), however, needs to be calculated with an unsigned widening
1339 multiplication. If this operation is not directly supported we
1340 try using a signed widening multiplication and adjust the result.
1341 This adjustment works as follows:
1343 If both operands are positive then no adjustment is needed.
1345 If the operands have different signs, for example op0_low < 0 and
1346 op1_low >= 0, the instruction treats the most significant bit of
1347 op0_low as a sign bit instead of a bit with significance
1348 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1349 with 2**BITS_PER_WORD - op0_low, and two's complements the
1350 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1353 Similarly, if both operands are negative, we need to add
1354 (op0_low + op1_low) * 2**BITS_PER_WORD.
1356 We use a trick to adjust quickly. We logically shift op0_low right
1357 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1358 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1359 logical shift exists, we do an arithmetic right shift and subtract
1362 if (binoptab
== smul_optab
1363 && class == MODE_INT
1364 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1365 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1366 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1367 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1368 != CODE_FOR_nothing
)
1369 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1370 != CODE_FOR_nothing
)))
1372 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1373 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1374 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1375 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1376 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1377 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1379 rtx op0_xhigh
= NULL_RTX
;
1380 rtx op1_xhigh
= NULL_RTX
;
1382 /* If the target is the same as one of the inputs, don't use it. This
1383 prevents problems with the REG_EQUAL note. */
1384 if (target
== op0
|| target
== op1
1385 || (target
!= 0 && GET_CODE (target
) != REG
))
1388 /* Multiply the two lower words to get a double-word product.
1389 If unsigned widening multiplication is available, use that;
1390 otherwise use the signed form and compensate. */
1392 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1394 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1395 target
, 1, OPTAB_DIRECT
);
1397 /* If we didn't succeed, delete everything we did so far. */
1399 delete_insns_since (last
);
1401 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1405 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1406 != CODE_FOR_nothing
)
1408 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1409 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1410 target
, 1, OPTAB_DIRECT
);
1411 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1412 NULL_RTX
, 1, next_methods
);
1414 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1415 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1418 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1419 NULL_RTX
, 0, next_methods
);
1421 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1422 op0_xhigh
, op0_xhigh
, 0,
1426 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1427 NULL_RTX
, 1, next_methods
);
1429 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1430 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1433 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1434 NULL_RTX
, 0, next_methods
);
1436 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1437 op1_xhigh
, op1_xhigh
, 0,
1442 /* If we have been able to directly compute the product of the
1443 low-order words of the operands and perform any required adjustments
1444 of the operands, we proceed by trying two more multiplications
1445 and then computing the appropriate sum.
1447 We have checked above that the required addition is provided.
1448 Full-word addition will normally always succeed, especially if
1449 it is provided at all, so we don't worry about its failure. The
1450 multiplication may well fail, however, so we do handle that. */
1452 if (product
&& op0_xhigh
&& op1_xhigh
)
1454 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1455 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1456 NULL_RTX
, 0, OPTAB_DIRECT
);
1458 if (!REG_P (product_high
))
1459 product_high
= force_reg (word_mode
, product_high
);
1462 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1463 product_high
, 0, next_methods
);
1465 if (temp
!= 0 && temp
!= product_high
)
1466 emit_move_insn (product_high
, temp
);
1469 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1470 NULL_RTX
, 0, OPTAB_DIRECT
);
1473 temp
= expand_binop (word_mode
, add_optab
, temp
,
1474 product_high
, product_high
,
1477 if (temp
!= 0 && temp
!= product_high
)
1478 emit_move_insn (product_high
, temp
);
1480 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1484 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1486 temp
= emit_move_insn (product
, product
);
1487 set_unique_reg_note (temp
,
1489 gen_rtx_fmt_ee (MULT
, mode
,
1498 /* If we get here, we couldn't do it for some reason even though we
1499 originally thought we could. Delete anything we've emitted in
1502 delete_insns_since (last
);
1505 /* Open-code the vector operations if we have no hardware support
1507 if (class == MODE_VECTOR_INT
|| class == MODE_VECTOR_FLOAT
)
1508 return expand_vector_binop (mode
, binoptab
, op0
, op1
, target
,
1509 unsignedp
, methods
);
1511 /* We need to open-code the complex type operations: '+, -, * and /' */
1513 /* At this point we allow operations between two similar complex
1514 numbers, and also if one of the operands is not a complex number
1515 but rather of MODE_FLOAT or MODE_INT. However, the caller
1516 must make sure that the MODE of the non-complex operand matches
1517 the SUBMODE of the complex operand. */
1519 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1521 rtx real0
= 0, imag0
= 0;
1522 rtx real1
= 0, imag1
= 0;
1523 rtx realr
, imagr
, res
;
1527 /* Find the correct mode for the real and imaginary parts. */
1528 enum machine_mode submode
= GET_MODE_INNER (mode
);
1530 if (submode
== BLKmode
)
1535 if (GET_MODE (op0
) == mode
)
1537 real0
= gen_realpart (submode
, op0
);
1538 imag0
= gen_imagpart (submode
, op0
);
1543 if (GET_MODE (op1
) == mode
)
1545 real1
= gen_realpart (submode
, op1
);
1546 imag1
= gen_imagpart (submode
, op1
);
1551 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0 || imag1
!= 0))
1554 result
= gen_reg_rtx (mode
);
1555 realr
= gen_realpart (submode
, result
);
1556 imagr
= gen_imagpart (submode
, result
);
1558 switch (binoptab
->code
)
1561 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1563 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1564 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1565 realr
, unsignedp
, methods
);
1569 else if (res
!= realr
)
1570 emit_move_insn (realr
, res
);
1572 if (imag0
!= 0 && imag1
!= 0)
1573 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1574 imagr
, unsignedp
, methods
);
1575 else if (imag0
!= 0)
1577 else if (binoptab
->code
== MINUS
)
1578 res
= expand_unop (submode
,
1579 binoptab
== subv_optab
? negv_optab
: neg_optab
,
1580 imag1
, imagr
, unsignedp
);
1586 else if (res
!= imagr
)
1587 emit_move_insn (imagr
, res
);
1593 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1595 if (imag0
!= 0 && imag1
!= 0)
1599 /* Don't fetch these from memory more than once. */
1600 real0
= force_reg (submode
, real0
);
1601 real1
= force_reg (submode
, real1
);
1602 imag0
= force_reg (submode
, imag0
);
1603 imag1
= force_reg (submode
, imag1
);
1605 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1606 unsignedp
, methods
);
1608 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1609 unsignedp
, methods
);
1611 if (temp1
== 0 || temp2
== 0)
1616 binoptab
== smulv_optab
? subv_optab
: sub_optab
,
1617 temp1
, temp2
, realr
, unsignedp
, methods
));
1621 else if (res
!= realr
)
1622 emit_move_insn (realr
, res
);
1624 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1625 NULL_RTX
, unsignedp
, methods
);
1627 /* Avoid expanding redundant multiplication for the common
1628 case of squaring a complex number. */
1629 if (rtx_equal_p (real0
, real1
) && rtx_equal_p (imag0
, imag1
))
1632 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1633 NULL_RTX
, unsignedp
, methods
);
1635 if (temp1
== 0 || temp2
== 0)
1640 binoptab
== smulv_optab
? addv_optab
: add_optab
,
1641 temp1
, temp2
, imagr
, unsignedp
, methods
));
1645 else if (res
!= imagr
)
1646 emit_move_insn (imagr
, res
);
1652 /* Don't fetch these from memory more than once. */
1653 real0
= force_reg (submode
, real0
);
1654 real1
= force_reg (submode
, real1
);
1656 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1657 realr
, unsignedp
, methods
);
1660 else if (res
!= realr
)
1661 emit_move_insn (realr
, res
);
1664 res
= expand_binop (submode
, binoptab
,
1665 real1
, imag0
, imagr
, unsignedp
, methods
);
1667 res
= expand_binop (submode
, binoptab
,
1668 real0
, imag1
, imagr
, unsignedp
, methods
);
1672 else if (res
!= imagr
)
1673 emit_move_insn (imagr
, res
);
1680 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1684 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1686 /* Don't fetch these from memory more than once. */
1687 real1
= force_reg (submode
, real1
);
1689 /* Simply divide the real and imaginary parts by `c' */
1690 if (class == MODE_COMPLEX_FLOAT
)
1691 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1692 realr
, unsignedp
, methods
);
1694 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1695 real0
, real1
, realr
, unsignedp
);
1699 else if (res
!= realr
)
1700 emit_move_insn (realr
, res
);
1702 if (class == MODE_COMPLEX_FLOAT
)
1703 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1704 imagr
, unsignedp
, methods
);
1706 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1707 imag0
, real1
, imagr
, unsignedp
);
1711 else if (res
!= imagr
)
1712 emit_move_insn (imagr
, res
);
1718 switch (flag_complex_divide_method
)
1721 ok
= expand_cmplxdiv_straight (real0
, real1
, imag0
, imag1
,
1722 realr
, imagr
, submode
,
1728 ok
= expand_cmplxdiv_wide (real0
, real1
, imag0
, imag1
,
1729 realr
, imagr
, submode
,
1749 rtx equiv
= gen_rtx_fmt_ee (binoptab
->code
, mode
,
1750 copy_rtx (op0
), copy_rtx (op1
));
1751 emit_no_conflict_block (seq
, result
, op0
, op1
, equiv
);
1756 /* It can't be open-coded in this mode.
1757 Use a library call if one is available and caller says that's ok. */
1759 if (binoptab
->handlers
[(int) mode
].libfunc
1760 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1764 enum machine_mode op1_mode
= mode
;
1771 op1_mode
= word_mode
;
1772 /* Specify unsigned here,
1773 since negative shift counts are meaningless. */
1774 op1x
= convert_to_mode (word_mode
, op1
, 1);
1777 if (GET_MODE (op0
) != VOIDmode
1778 && GET_MODE (op0
) != mode
)
1779 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1781 /* Pass 1 for NO_QUEUE so we don't lose any increments
1782 if the libcall is cse'd or moved. */
1783 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1784 NULL_RTX
, LCT_CONST
, mode
, 2,
1785 op0
, mode
, op1x
, op1_mode
);
1787 insns
= get_insns ();
1790 target
= gen_reg_rtx (mode
);
1791 emit_libcall_block (insns
, target
, value
,
1792 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1797 delete_insns_since (last
);
1799 /* It can't be done in this mode. Can we do it in a wider mode? */
1801 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1802 || methods
== OPTAB_MUST_WIDEN
))
1804 /* Caller says, don't even try. */
1805 delete_insns_since (entry_last
);
1809 /* Compute the value of METHODS to pass to recursive calls.
1810 Don't allow widening to be tried recursively. */
1812 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1814 /* Look for a wider mode of the same class for which it appears we can do
1817 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1819 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1820 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1822 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1823 != CODE_FOR_nothing
)
1824 || (methods
== OPTAB_LIB
1825 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1827 rtx xop0
= op0
, xop1
= op1
;
1830 /* For certain integer operations, we need not actually extend
1831 the narrow operands, as long as we will truncate
1832 the results to the same narrowness. */
1834 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1835 || binoptab
== xor_optab
1836 || binoptab
== add_optab
|| binoptab
== sub_optab
1837 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1838 && class == MODE_INT
)
1841 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1842 unsignedp
, no_extend
);
1844 /* The second operand of a shift must always be extended. */
1845 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1846 no_extend
&& binoptab
!= ashl_optab
);
1848 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1849 unsignedp
, methods
);
1852 if (class != MODE_INT
)
1855 target
= gen_reg_rtx (mode
);
1856 convert_move (target
, temp
, 0);
1860 return gen_lowpart (mode
, temp
);
1863 delete_insns_since (last
);
1868 delete_insns_since (entry_last
);
1872 /* Like expand_binop, but for open-coding vectors binops. */
1875 expand_vector_binop (enum machine_mode mode
, optab binoptab
, rtx op0
,
1876 rtx op1
, rtx target
, int unsignedp
,
1877 enum optab_methods methods
)
1879 enum machine_mode submode
, tmode
;
1880 int size
, elts
, subsize
, subbitsize
, i
;
1881 rtx t
, a
, b
, res
, seq
;
1882 enum mode_class
class;
1884 class = GET_MODE_CLASS (mode
);
1886 size
= GET_MODE_SIZE (mode
);
1887 submode
= GET_MODE_INNER (mode
);
1889 /* Search for the widest vector mode with the same inner mode that is
1890 still narrower than MODE and that allows to open-code this operator.
1891 Note, if we find such a mode and the handler later decides it can't
1892 do the expansion, we'll be called recursively with the narrower mode. */
1893 for (tmode
= GET_CLASS_NARROWEST_MODE (class);
1894 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
1895 tmode
= GET_MODE_WIDER_MODE (tmode
))
1897 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
1898 && binoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
1902 switch (binoptab
->code
)
1907 tmode
= int_mode_for_mode (mode
);
1908 if (tmode
!= BLKmode
)
1914 subsize
= GET_MODE_SIZE (submode
);
1915 subbitsize
= GET_MODE_BITSIZE (submode
);
1916 elts
= size
/ subsize
;
1918 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1919 but that we operate on more than one element at a time. */
1920 if (subsize
== GET_MODE_UNIT_SIZE (mode
) && methods
== OPTAB_DIRECT
)
1925 /* Errors can leave us with a const0_rtx as operand. */
1926 if (GET_MODE (op0
) != mode
)
1927 op0
= copy_to_mode_reg (mode
, op0
);
1928 if (GET_MODE (op1
) != mode
)
1929 op1
= copy_to_mode_reg (mode
, op1
);
1932 target
= gen_reg_rtx (mode
);
1934 for (i
= 0; i
< elts
; ++i
)
1936 /* If this is part of a register, and not the first item in the
1937 word, we can't store using a SUBREG - that would clobber
1939 And storing with a SUBREG is only possible for the least
1940 significant part, hence we can't do it for big endian
1941 (unless we want to permute the evaluation order. */
1942 if (GET_CODE (target
) == REG
1943 && (BYTES_BIG_ENDIAN
1944 ? subsize
< UNITS_PER_WORD
1945 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
1948 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
1949 if (CONSTANT_P (op0
))
1950 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
1952 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
1953 NULL_RTX
, submode
, submode
, size
);
1954 if (CONSTANT_P (op1
))
1955 b
= simplify_gen_subreg (submode
, op1
, mode
, i
* subsize
);
1957 b
= extract_bit_field (op1
, subbitsize
, i
* subbitsize
, unsignedp
,
1958 NULL_RTX
, submode
, submode
, size
);
1960 if (binoptab
->code
== DIV
)
1962 if (class == MODE_VECTOR_FLOAT
)
1963 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1964 unsignedp
, methods
);
1966 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1967 a
, b
, t
, unsignedp
);
1970 res
= expand_binop (submode
, binoptab
, a
, b
, t
,
1971 unsignedp
, methods
);
1977 emit_move_insn (t
, res
);
1979 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
1995 /* Like expand_unop but for open-coding vector unops. */
1998 expand_vector_unop (enum machine_mode mode
, optab unoptab
, rtx op0
,
1999 rtx target
, int unsignedp
)
2001 enum machine_mode submode
, tmode
;
2002 int size
, elts
, subsize
, subbitsize
, i
;
2005 size
= GET_MODE_SIZE (mode
);
2006 submode
= GET_MODE_INNER (mode
);
2008 /* Search for the widest vector mode with the same inner mode that is
2009 still narrower than MODE and that allows to open-code this operator.
2010 Note, if we find such a mode and the handler later decides it can't
2011 do the expansion, we'll be called recursively with the narrower mode. */
2012 for (tmode
= GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode
));
2013 GET_MODE_SIZE (tmode
) < GET_MODE_SIZE (mode
);
2014 tmode
= GET_MODE_WIDER_MODE (tmode
))
2016 if (GET_MODE_INNER (tmode
) == GET_MODE_INNER (mode
)
2017 && unoptab
->handlers
[(int) tmode
].insn_code
!= CODE_FOR_nothing
)
2020 /* If there is no negate operation, try doing a subtract from zero. */
2021 if (unoptab
== neg_optab
&& GET_MODE_CLASS (submode
) == MODE_INT
2022 /* Avoid infinite recursion when an
2023 error has left us with the wrong mode. */
2024 && GET_MODE (op0
) == mode
)
2027 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2028 target
, unsignedp
, OPTAB_DIRECT
);
2033 if (unoptab
== one_cmpl_optab
)
2035 tmode
= int_mode_for_mode (mode
);
2036 if (tmode
!= BLKmode
)
2040 subsize
= GET_MODE_SIZE (submode
);
2041 subbitsize
= GET_MODE_BITSIZE (submode
);
2042 elts
= size
/ subsize
;
2044 /* Errors can leave us with a const0_rtx as operand. */
2045 if (GET_MODE (op0
) != mode
)
2046 op0
= copy_to_mode_reg (mode
, op0
);
2049 target
= gen_reg_rtx (mode
);
2053 for (i
= 0; i
< elts
; ++i
)
2055 /* If this is part of a register, and not the first item in the
2056 word, we can't store using a SUBREG - that would clobber
2058 And storing with a SUBREG is only possible for the least
2059 significant part, hence we can't do it for big endian
2060 (unless we want to permute the evaluation order. */
2061 if (GET_CODE (target
) == REG
2062 && (BYTES_BIG_ENDIAN
2063 ? subsize
< UNITS_PER_WORD
2064 : ((i
* subsize
) % UNITS_PER_WORD
) != 0))
2067 t
= simplify_gen_subreg (submode
, target
, mode
, i
* subsize
);
2068 if (CONSTANT_P (op0
))
2069 a
= simplify_gen_subreg (submode
, op0
, mode
, i
* subsize
);
2071 a
= extract_bit_field (op0
, subbitsize
, i
* subbitsize
, unsignedp
,
2072 t
, submode
, submode
, size
);
2074 res
= expand_unop (submode
, unoptab
, a
, t
, unsignedp
);
2077 emit_move_insn (t
, res
);
2079 store_bit_field (target
, subbitsize
, i
* subbitsize
, submode
, res
,
2090 /* Expand a binary operator which has both signed and unsigned forms.
2091 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2094 If we widen unsigned operands, we may use a signed wider operation instead
2095 of an unsigned wider operation, since the result would be the same. */
2098 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
2099 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2100 enum optab_methods methods
)
2103 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2104 struct optab wide_soptab
;
2106 /* Do it without widening, if possible. */
2107 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2108 unsignedp
, OPTAB_DIRECT
);
2109 if (temp
|| methods
== OPTAB_DIRECT
)
2112 /* Try widening to a signed int. Make a fake signed optab that
2113 hides any signed insn for direct use. */
2114 wide_soptab
= *soptab
;
2115 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
2116 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
2118 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2119 unsignedp
, OPTAB_WIDEN
);
2121 /* For unsigned operands, try widening to an unsigned int. */
2122 if (temp
== 0 && unsignedp
)
2123 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2124 unsignedp
, OPTAB_WIDEN
);
2125 if (temp
|| methods
== OPTAB_WIDEN
)
2128 /* Use the right width lib call if that exists. */
2129 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
2130 if (temp
|| methods
== OPTAB_LIB
)
2133 /* Must widen and use a lib call, use either signed or unsigned. */
2134 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
2135 unsignedp
, methods
);
2139 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
2140 unsignedp
, methods
);
2144 /* Generate code to perform an operation specified by BINOPTAB
2145 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2146 We assume that the order of the operands for the instruction
2147 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2148 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2150 Either TARG0 or TARG1 may be zero, but what that means is that
2151 the result is not actually wanted. We will generate it into
2152 a dummy pseudo-reg and discard it. They may not both be zero.
2154 Returns 1 if this operation can be performed; 0 if not. */
2157 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2160 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2161 enum mode_class
class;
2162 enum machine_mode wider_mode
;
2163 rtx entry_last
= get_last_insn ();
2166 class = GET_MODE_CLASS (mode
);
2168 op0
= protect_from_queue (op0
, 0);
2169 op1
= protect_from_queue (op1
, 0);
2173 op0
= force_not_mem (op0
);
2174 op1
= force_not_mem (op1
);
2177 /* If we are inside an appropriately-short loop and one operand is an
2178 expensive constant, force it into a register. */
2179 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
2180 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
2181 op0
= force_reg (mode
, op0
);
2183 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
2184 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
2185 op1
= force_reg (mode
, op1
);
2188 targ0
= protect_from_queue (targ0
, 1);
2190 targ0
= gen_reg_rtx (mode
);
2192 targ1
= protect_from_queue (targ1
, 1);
2194 targ1
= gen_reg_rtx (mode
);
2196 /* Record where to go back to if we fail. */
2197 last
= get_last_insn ();
2199 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2201 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
2202 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2203 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2205 rtx xop0
= op0
, xop1
= op1
;
2207 /* In case the insn wants input operands in modes different from
2208 those of the actual operands, convert the operands. It would
2209 seem that we don't need to convert CONST_INTs, but we do, so
2210 that they're properly zero-extended, sign-extended or truncated
2213 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2214 xop0
= convert_modes (mode0
,
2215 GET_MODE (op0
) != VOIDmode
2220 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2221 xop1
= convert_modes (mode1
,
2222 GET_MODE (op1
) != VOIDmode
2227 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2228 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2229 xop0
= copy_to_mode_reg (mode0
, xop0
);
2231 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
2232 xop1
= copy_to_mode_reg (mode1
, xop1
);
2234 /* We could handle this, but we should always be called with a pseudo
2235 for our targets and all insns should take them as outputs. */
2236 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
2237 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
2240 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2247 delete_insns_since (last
);
2250 /* It can't be done in this mode. Can we do it in a wider mode? */
2252 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2254 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2255 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2257 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2258 != CODE_FOR_nothing
)
2260 rtx t0
= gen_reg_rtx (wider_mode
);
2261 rtx t1
= gen_reg_rtx (wider_mode
);
2262 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2263 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2265 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2268 convert_move (targ0
, t0
, unsignedp
);
2269 convert_move (targ1
, t1
, unsignedp
);
2273 delete_insns_since (last
);
2278 delete_insns_since (entry_last
);
2282 /* Wrapper around expand_unop which takes an rtx code to specify
2283 the operation to perform, not an optab pointer. All other
2284 arguments are the same. */
2286 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2287 rtx target
, int unsignedp
)
2289 optab unop
= code_to_optab
[(int) code
];
2293 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2299 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2301 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2303 enum mode_class
class = GET_MODE_CLASS (mode
);
2304 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2306 enum machine_mode wider_mode
;
2307 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2308 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2310 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2311 != CODE_FOR_nothing
)
2313 rtx xop0
, temp
, last
;
2315 last
= get_last_insn ();
2318 target
= gen_reg_rtx (mode
);
2319 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2320 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2322 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2323 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2324 - GET_MODE_BITSIZE (mode
)),
2325 target
, true, OPTAB_DIRECT
);
2327 delete_insns_since (last
);
2336 /* Try calculating (parity x) as (and (popcount x) 1), where
2337 popcount can also be done in a wider mode. */
2339 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2341 enum mode_class
class = GET_MODE_CLASS (mode
);
2342 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2344 enum machine_mode wider_mode
;
2345 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2346 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2348 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2349 != CODE_FOR_nothing
)
2351 rtx xop0
, temp
, last
;
2353 last
= get_last_insn ();
2356 target
= gen_reg_rtx (mode
);
2357 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2358 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2361 temp
= expand_binop (wider_mode
, and_optab
, temp
, GEN_INT (1),
2362 target
, true, OPTAB_DIRECT
);
2364 delete_insns_since (last
);
2373 /* Generate code to perform an operation specified by UNOPTAB
2374 on operand OP0, with result having machine-mode MODE.
2376 UNSIGNEDP is for the case where we have to widen the operands
2377 to perform the operation. It says to use zero-extension.
2379 If TARGET is nonzero, the value
2380 is generated there, if it is convenient to do so.
2381 In all cases an rtx is returned for the locus of the value;
2382 this may or may not be TARGET. */
2385 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2388 enum mode_class
class;
2389 enum machine_mode wider_mode
;
2391 rtx last
= get_last_insn ();
2394 class = GET_MODE_CLASS (mode
);
2396 op0
= protect_from_queue (op0
, 0);
2400 op0
= force_not_mem (op0
);
2404 target
= protect_from_queue (target
, 1);
2406 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2408 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2409 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2415 temp
= gen_reg_rtx (mode
);
2417 if (GET_MODE (xop0
) != VOIDmode
2418 && GET_MODE (xop0
) != mode0
)
2419 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2421 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2423 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2424 xop0
= copy_to_mode_reg (mode0
, xop0
);
2426 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2427 temp
= gen_reg_rtx (mode
);
2429 pat
= GEN_FCN (icode
) (temp
, xop0
);
2432 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2433 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2435 delete_insns_since (last
);
2436 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2444 delete_insns_since (last
);
2447 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2449 /* Widening clz needs special treatment. */
2450 if (unoptab
== clz_optab
)
2452 temp
= widen_clz (mode
, op0
, target
);
2459 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2460 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2461 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2463 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2467 /* For certain operations, we need not actually extend
2468 the narrow operand, as long as we will truncate the
2469 results to the same narrowness. */
2471 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2472 (unoptab
== neg_optab
2473 || unoptab
== one_cmpl_optab
)
2474 && class == MODE_INT
);
2476 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2481 if (class != MODE_INT
)
2484 target
= gen_reg_rtx (mode
);
2485 convert_move (target
, temp
, 0);
2489 return gen_lowpart (mode
, temp
);
2492 delete_insns_since (last
);
2496 /* These can be done a word at a time. */
2497 if (unoptab
== one_cmpl_optab
2498 && class == MODE_INT
2499 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2500 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2505 if (target
== 0 || target
== op0
)
2506 target
= gen_reg_rtx (mode
);
2510 /* Do the actual arithmetic. */
2511 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2513 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2514 rtx x
= expand_unop (word_mode
, unoptab
,
2515 operand_subword_force (op0
, i
, mode
),
2516 target_piece
, unsignedp
);
2518 if (target_piece
!= x
)
2519 emit_move_insn (target_piece
, x
);
2522 insns
= get_insns ();
2525 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2526 gen_rtx_fmt_e (unoptab
->code
, mode
,
2531 /* Open-code the complex negation operation. */
2532 else if (unoptab
->code
== NEG
2533 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
2539 /* Find the correct mode for the real and imaginary parts. */
2540 enum machine_mode submode
= GET_MODE_INNER (mode
);
2542 if (submode
== BLKmode
)
2546 target
= gen_reg_rtx (mode
);
2550 target_piece
= gen_imagpart (submode
, target
);
2551 x
= expand_unop (submode
, unoptab
,
2552 gen_imagpart (submode
, op0
),
2553 target_piece
, unsignedp
);
2554 if (target_piece
!= x
)
2555 emit_move_insn (target_piece
, x
);
2557 target_piece
= gen_realpart (submode
, target
);
2558 x
= expand_unop (submode
, unoptab
,
2559 gen_realpart (submode
, op0
),
2560 target_piece
, unsignedp
);
2561 if (target_piece
!= x
)
2562 emit_move_insn (target_piece
, x
);
2567 emit_no_conflict_block (seq
, target
, op0
, 0,
2568 gen_rtx_fmt_e (unoptab
->code
, mode
,
2573 /* Try negating floating point values by flipping the sign bit. */
2574 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2575 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2577 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2578 enum machine_mode imode
= int_mode_for_mode (mode
);
2579 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2581 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2583 HOST_WIDE_INT hi
, lo
;
2584 rtx last
= get_last_insn ();
2586 /* Handle targets with different FP word orders. */
2587 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2589 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2590 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2591 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2594 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2597 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2601 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2604 temp
= expand_binop (imode
, xor_optab
,
2605 gen_lowpart (imode
, op0
),
2606 immed_double_const (lo
, hi
, imode
),
2607 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2612 target
= gen_reg_rtx (mode
);
2613 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2614 set_unique_reg_note (insn
, REG_EQUAL
,
2615 gen_rtx_fmt_e (NEG
, mode
,
2619 delete_insns_since (last
);
2623 /* Try calculating parity (x) as popcount (x) % 2. */
2624 if (unoptab
== parity_optab
)
2626 temp
= expand_parity (mode
, op0
, target
);
2632 /* Now try a library call in this mode. */
2633 if (unoptab
->handlers
[(int) mode
].libfunc
)
2637 enum machine_mode outmode
= mode
;
2639 /* All of these functions return small values. Thus we choose to
2640 have them return something that isn't a double-word. */
2641 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2642 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2644 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2648 /* Pass 1 for NO_QUEUE so we don't lose any increments
2649 if the libcall is cse'd or moved. */
2650 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2651 NULL_RTX
, LCT_CONST
, outmode
,
2653 insns
= get_insns ();
2656 target
= gen_reg_rtx (outmode
);
2657 emit_libcall_block (insns
, target
, value
,
2658 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2663 if (class == MODE_VECTOR_FLOAT
|| class == MODE_VECTOR_INT
)
2664 return expand_vector_unop (mode
, unoptab
, op0
, target
, unsignedp
);
2666 /* It can't be done in this mode. Can we do it in a wider mode? */
2668 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2670 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2671 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2673 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2674 != CODE_FOR_nothing
)
2675 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2679 /* For certain operations, we need not actually extend
2680 the narrow operand, as long as we will truncate the
2681 results to the same narrowness. */
2683 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2684 (unoptab
== neg_optab
2685 || unoptab
== one_cmpl_optab
)
2686 && class == MODE_INT
);
2688 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2691 /* If we are generating clz using wider mode, adjust the
2693 if (unoptab
== clz_optab
&& temp
!= 0)
2694 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2695 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2696 - GET_MODE_BITSIZE (mode
)),
2697 target
, true, OPTAB_DIRECT
);
2701 if (class != MODE_INT
)
2704 target
= gen_reg_rtx (mode
);
2705 convert_move (target
, temp
, 0);
2709 return gen_lowpart (mode
, temp
);
2712 delete_insns_since (last
);
2717 /* If there is no negate operation, try doing a subtract from zero.
2718 The US Software GOFAST library needs this. */
2719 if (unoptab
->code
== NEG
)
2722 temp
= expand_binop (mode
,
2723 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2724 CONST0_RTX (mode
), op0
,
2725 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2733 /* Emit code to compute the absolute value of OP0, with result to
2734 TARGET if convenient. (TARGET may be 0.) The return value says
2735 where the result actually is to be found.
2737 MODE is the mode of the operand; the mode of the result is
2738 different but can be deduced from MODE.
2743 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2744 int result_unsignedp
)
2749 result_unsignedp
= 1;
2751 /* First try to do it with a special abs instruction. */
2752 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2757 /* For floating point modes, try clearing the sign bit. */
2758 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2759 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2761 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2762 enum machine_mode imode
= int_mode_for_mode (mode
);
2763 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2765 if (imode
!= BLKmode
&& bitpos
>= 0)
2767 HOST_WIDE_INT hi
, lo
;
2768 rtx last
= get_last_insn ();
2770 /* Handle targets with different FP word orders. */
2771 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2773 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2774 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2775 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2778 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2781 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2785 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2788 temp
= expand_binop (imode
, and_optab
,
2789 gen_lowpart (imode
, op0
),
2790 immed_double_const (~lo
, ~hi
, imode
),
2791 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2796 target
= gen_reg_rtx (mode
);
2797 insn
= emit_move_insn (target
, gen_lowpart (mode
, temp
));
2798 set_unique_reg_note (insn
, REG_EQUAL
,
2799 gen_rtx_fmt_e (ABS
, mode
,
2803 delete_insns_since (last
);
2807 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2808 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2810 rtx last
= get_last_insn ();
2812 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2814 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2820 delete_insns_since (last
);
2823 /* If this machine has expensive jumps, we can do integer absolute
2824 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2825 where W is the width of MODE. */
2827 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2829 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2830 size_int (GET_MODE_BITSIZE (mode
) - 1),
2833 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2836 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2837 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2847 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2848 int result_unsignedp
, int safe
)
2853 result_unsignedp
= 1;
2855 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2859 /* If that does not win, use conditional jump and negate. */
2861 /* It is safe to use the target if it is the same
2862 as the source if this is also a pseudo register */
2863 if (op0
== target
&& GET_CODE (op0
) == REG
2864 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2867 op1
= gen_label_rtx ();
2868 if (target
== 0 || ! safe
2869 || GET_MODE (target
) != mode
2870 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2871 || (GET_CODE (target
) == REG
2872 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2873 target
= gen_reg_rtx (mode
);
2875 emit_move_insn (target
, op0
);
2878 /* If this mode is an integer too wide to compare properly,
2879 compare word by word. Rely on CSE to optimize constant cases. */
2880 if (GET_MODE_CLASS (mode
) == MODE_INT
2881 && ! can_compare_p (GE
, mode
, ccp_jump
))
2882 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2885 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2886 NULL_RTX
, NULL_RTX
, op1
);
2888 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2891 emit_move_insn (target
, op0
);
2897 /* Emit code to compute the absolute value of OP0, with result to
2898 TARGET if convenient. (TARGET may be 0.) The return value says
2899 where the result actually is to be found.
2901 MODE is the mode of the operand; the mode of the result is
2902 different but can be deduced from MODE.
2904 UNSIGNEDP is relevant for complex integer modes. */
2907 expand_complex_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2910 enum mode_class
class = GET_MODE_CLASS (mode
);
2911 enum machine_mode wider_mode
;
2913 rtx entry_last
= get_last_insn ();
2916 optab this_abs_optab
;
2918 /* Find the correct mode for the real and imaginary parts. */
2919 enum machine_mode submode
= GET_MODE_INNER (mode
);
2921 if (submode
== BLKmode
)
2924 op0
= protect_from_queue (op0
, 0);
2928 op0
= force_not_mem (op0
);
2931 last
= get_last_insn ();
2934 target
= protect_from_queue (target
, 1);
2936 this_abs_optab
= ! unsignedp
&& flag_trapv
2937 && (GET_MODE_CLASS(mode
) == MODE_INT
)
2938 ? absv_optab
: abs_optab
;
2940 if (this_abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2942 int icode
= (int) this_abs_optab
->handlers
[(int) mode
].insn_code
;
2943 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2949 temp
= gen_reg_rtx (submode
);
2951 if (GET_MODE (xop0
) != VOIDmode
2952 && GET_MODE (xop0
) != mode0
)
2953 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2955 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2957 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2958 xop0
= copy_to_mode_reg (mode0
, xop0
);
2960 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, submode
))
2961 temp
= gen_reg_rtx (submode
);
2963 pat
= GEN_FCN (icode
) (temp
, xop0
);
2966 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2967 && ! add_equal_note (pat
, temp
, this_abs_optab
->code
, xop0
,
2970 delete_insns_since (last
);
2971 return expand_unop (mode
, this_abs_optab
, op0
, NULL_RTX
,
2980 delete_insns_since (last
);
2983 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2985 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2986 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2988 if (this_abs_optab
->handlers
[(int) wider_mode
].insn_code
2989 != CODE_FOR_nothing
)
2993 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2994 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2998 if (class != MODE_COMPLEX_INT
)
3001 target
= gen_reg_rtx (submode
);
3002 convert_move (target
, temp
, 0);
3006 return gen_lowpart (submode
, temp
);
3009 delete_insns_since (last
);
3013 /* Open-code the complex absolute-value operation
3014 if we can open-code sqrt. Otherwise it's not worth while. */
3015 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
3018 rtx real
, imag
, total
;
3020 real
= gen_realpart (submode
, op0
);
3021 imag
= gen_imagpart (submode
, op0
);
3023 /* Square both parts. */
3024 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
3025 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
3027 /* Sum the parts. */
3028 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
3029 0, OPTAB_LIB_WIDEN
);
3031 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3032 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
3034 delete_insns_since (last
);
3039 /* Now try a library call in this mode. */
3040 if (this_abs_optab
->handlers
[(int) mode
].libfunc
)
3047 /* Pass 1 for NO_QUEUE so we don't lose any increments
3048 if the libcall is cse'd or moved. */
3049 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
3050 NULL_RTX
, LCT_CONST
, submode
, 1, op0
, mode
);
3051 insns
= get_insns ();
3054 target
= gen_reg_rtx (submode
);
3055 emit_libcall_block (insns
, target
, value
,
3056 gen_rtx_fmt_e (this_abs_optab
->code
, mode
, op0
));
3061 /* It can't be done in this mode. Can we do it in a wider mode? */
3063 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3064 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3066 if ((this_abs_optab
->handlers
[(int) wider_mode
].insn_code
3067 != CODE_FOR_nothing
)
3068 || this_abs_optab
->handlers
[(int) wider_mode
].libfunc
)
3072 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
3074 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
3078 if (class != MODE_COMPLEX_INT
)
3081 target
= gen_reg_rtx (submode
);
3082 convert_move (target
, temp
, 0);
3086 return gen_lowpart (submode
, temp
);
3089 delete_insns_since (last
);
3093 delete_insns_since (entry_last
);
3097 /* Generate an instruction whose insn-code is INSN_CODE,
3098 with two operands: an output TARGET and an input OP0.
3099 TARGET *must* be nonzero, and the output is always stored there.
3100 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3101 the value that is stored into TARGET. */
3104 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
3107 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
3110 temp
= target
= protect_from_queue (target
, 1);
3112 op0
= protect_from_queue (op0
, 0);
3114 /* Sign and zero extension from memory is often done specially on
3115 RISC machines, so forcing into a register here can pessimize
3117 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
3118 op0
= force_not_mem (op0
);
3120 /* Now, if insn does not accept our operands, put them into pseudos. */
3122 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
3123 op0
= copy_to_mode_reg (mode0
, op0
);
3125 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
3126 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
3127 temp
= gen_reg_rtx (GET_MODE (temp
));
3129 pat
= GEN_FCN (icode
) (temp
, op0
);
3131 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3132 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3137 emit_move_insn (target
, temp
);
3140 /* Emit code to perform a series of operations on a multi-word quantity, one
3143 Such a block is preceded by a CLOBBER of the output, consists of multiple
3144 insns, each setting one word of the output, and followed by a SET copying
3145 the output to itself.
3147 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3148 note indicating that it doesn't conflict with the (also multi-word)
3149 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3152 INSNS is a block of code generated to perform the operation, not including
3153 the CLOBBER and final copy. All insns that compute intermediate values
3154 are first emitted, followed by the block as described above.
3156 TARGET, OP0, and OP1 are the output and inputs of the operations,
3157 respectively. OP1 may be zero for a unary operation.
3159 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3162 If TARGET is not a register, INSNS is simply emitted with no special
3163 processing. Likewise if anything in INSNS is not an INSN or if
3164 there is a libcall block inside INSNS.
3166 The final insn emitted is returned. */
3169 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3171 rtx prev
, next
, first
, last
, insn
;
3173 if (GET_CODE (target
) != REG
|| reload_in_progress
)
3174 return emit_insn (insns
);
3176 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3177 if (GET_CODE (insn
) != INSN
3178 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3179 return emit_insn (insns
);
3181 /* First emit all insns that do not store into words of the output and remove
3182 these from the list. */
3183 for (insn
= insns
; insn
; insn
= next
)
3188 next
= NEXT_INSN (insn
);
3190 /* Some ports (cris) create a libcall regions at their own. We must
3191 avoid any potential nesting of LIBCALLs. */
3192 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3193 remove_note (insn
, note
);
3194 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3195 remove_note (insn
, note
);
3197 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3198 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3199 set
= PATTERN (insn
);
3200 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3202 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3203 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3205 set
= XVECEXP (PATTERN (insn
), 0, i
);
3213 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3215 if (PREV_INSN (insn
))
3216 NEXT_INSN (PREV_INSN (insn
)) = next
;
3221 PREV_INSN (next
) = PREV_INSN (insn
);
3227 prev
= get_last_insn ();
3229 /* Now write the CLOBBER of the output, followed by the setting of each
3230 of the words, followed by the final copy. */
3231 if (target
!= op0
&& target
!= op1
)
3232 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3234 for (insn
= insns
; insn
; insn
= next
)
3236 next
= NEXT_INSN (insn
);
3239 if (op1
&& GET_CODE (op1
) == REG
)
3240 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3243 if (op0
&& GET_CODE (op0
) == REG
)
3244 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3248 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3249 != CODE_FOR_nothing
)
3251 last
= emit_move_insn (target
, target
);
3253 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3257 last
= get_last_insn ();
3259 /* Remove any existing REG_EQUAL note from "last", or else it will
3260 be mistaken for a note referring to the full contents of the
3261 alleged libcall value when found together with the REG_RETVAL
3262 note added below. An existing note can come from an insn
3263 expansion at "last". */
3264 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3268 first
= get_insns ();
3270 first
= NEXT_INSN (prev
);
3272 /* Encapsulate the block so it gets manipulated as a unit. */
3273 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3275 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3280 /* Emit code to make a call to a constant function or a library call.
3282 INSNS is a list containing all insns emitted in the call.
3283 These insns leave the result in RESULT. Our block is to copy RESULT
3284 to TARGET, which is logically equivalent to EQUIV.
3286 We first emit any insns that set a pseudo on the assumption that these are
3287 loading constants into registers; doing so allows them to be safely cse'ed
3288 between blocks. Then we emit all the other insns in the block, followed by
3289 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3290 note with an operand of EQUIV.
3292 Moving assignments to pseudos outside of the block is done to improve
3293 the generated code, but is not required to generate correct code,
3294 hence being unable to move an assignment is not grounds for not making
3295 a libcall block. There are two reasons why it is safe to leave these
3296 insns inside the block: First, we know that these pseudos cannot be
3297 used in generated RTL outside the block since they are created for
3298 temporary purposes within the block. Second, CSE will not record the
3299 values of anything set inside a libcall block, so we know they must
3300 be dead at the end of the block.
3302 Except for the first group of insns (the ones setting pseudos), the
3303 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3306 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3308 rtx final_dest
= target
;
3309 rtx prev
, next
, first
, last
, insn
;
3311 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3312 into a MEM later. Protect the libcall block from this change. */
3313 if (! REG_P (target
) || REG_USERVAR_P (target
))
3314 target
= gen_reg_rtx (GET_MODE (target
));
3316 /* If we're using non-call exceptions, a libcall corresponding to an
3317 operation that may trap may also trap. */
3318 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3320 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3321 if (GET_CODE (insn
) == CALL_INSN
)
3323 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3325 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3326 remove_note (insn
, note
);
3330 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3331 reg note to indicate that this call cannot throw or execute a nonlocal
3332 goto (unless there is already a REG_EH_REGION note, in which case
3334 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3335 if (GET_CODE (insn
) == CALL_INSN
)
3337 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3340 XEXP (note
, 0) = GEN_INT (-1);
3342 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, GEN_INT (-1),
3346 /* First emit all insns that set pseudos. Remove them from the list as
3347 we go. Avoid insns that set pseudos which were referenced in previous
3348 insns. These can be generated by move_by_pieces, for example,
3349 to update an address. Similarly, avoid insns that reference things
3350 set in previous insns. */
3352 for (insn
= insns
; insn
; insn
= next
)
3354 rtx set
= single_set (insn
);
3357 /* Some ports (cris) create a libcall regions at their own. We must
3358 avoid any potential nesting of LIBCALLs. */
3359 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3360 remove_note (insn
, note
);
3361 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3362 remove_note (insn
, note
);
3364 next
= NEXT_INSN (insn
);
3366 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
3367 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3369 || ((! INSN_P(insns
)
3370 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3371 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3372 && ! modified_in_p (SET_SRC (set
), insns
)
3373 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3375 if (PREV_INSN (insn
))
3376 NEXT_INSN (PREV_INSN (insn
)) = next
;
3381 PREV_INSN (next
) = PREV_INSN (insn
);
3386 /* Some ports use a loop to copy large arguments onto the stack.
3387 Don't move anything outside such a loop. */
3388 if (GET_CODE (insn
) == CODE_LABEL
)
3392 prev
= get_last_insn ();
3394 /* Write the remaining insns followed by the final copy. */
3396 for (insn
= insns
; insn
; insn
= next
)
3398 next
= NEXT_INSN (insn
);
3403 last
= emit_move_insn (target
, result
);
3404 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3405 != CODE_FOR_nothing
)
3406 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3409 /* Remove any existing REG_EQUAL note from "last", or else it will
3410 be mistaken for a note referring to the full contents of the
3411 libcall value when found together with the REG_RETVAL note added
3412 below. An existing note can come from an insn expansion at
3414 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3417 if (final_dest
!= target
)
3418 emit_move_insn (final_dest
, target
);
3421 first
= get_insns ();
3423 first
= NEXT_INSN (prev
);
3425 /* Encapsulate the block so it gets manipulated as a unit. */
3426 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3428 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3429 when the encapsulated region would not be in one basic block,
3430 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3432 bool attach_libcall_retval_notes
= true;
3433 next
= NEXT_INSN (last
);
3434 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3435 if (control_flow_insn_p (insn
))
3437 attach_libcall_retval_notes
= false;
3441 if (attach_libcall_retval_notes
)
3443 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3445 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3451 /* Generate code to store zero in X. */
3454 emit_clr_insn (rtx x
)
3456 emit_move_insn (x
, const0_rtx
);
3459 /* Generate code to store 1 in X
3460 assuming it contains zero beforehand. */
3463 emit_0_to_1_insn (rtx x
)
3465 emit_move_insn (x
, const1_rtx
);
3468 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3469 PURPOSE describes how this comparison will be used. CODE is the rtx
3470 comparison code we will be using.
3472 ??? Actually, CODE is slightly weaker than that. A target is still
3473 required to implement all of the normal bcc operations, but not
3474 required to implement all (or any) of the unordered bcc operations. */
3477 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3478 enum can_compare_purpose purpose
)
3482 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3484 if (purpose
== ccp_jump
)
3485 return bcc_gen_fctn
[(int) code
] != NULL
;
3486 else if (purpose
== ccp_store_flag
)
3487 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3489 /* There's only one cmov entry point, and it's allowed to fail. */
3492 if (purpose
== ccp_jump
3493 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3495 if (purpose
== ccp_cmov
3496 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3498 if (purpose
== ccp_store_flag
3499 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3502 mode
= GET_MODE_WIDER_MODE (mode
);
3504 while (mode
!= VOIDmode
);
3509 /* This function is called when we are going to emit a compare instruction that
3510 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3512 *PMODE is the mode of the inputs (in case they are const_int).
3513 *PUNSIGNEDP nonzero says that the operands are unsigned;
3514 this matters if they need to be widened.
3516 If they have mode BLKmode, then SIZE specifies the size of both operands.
3518 This function performs all the setup necessary so that the caller only has
3519 to emit a single comparison insn. This setup can involve doing a BLKmode
3520 comparison or emitting a library call to perform the comparison if no insn
3521 is available to handle it.
3522 The values which are passed in through pointers can be modified; the caller
3523 should perform the comparison on the modified values. */
3526 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3527 enum machine_mode
*pmode
, int *punsignedp
,
3528 enum can_compare_purpose purpose
)
3530 enum machine_mode mode
= *pmode
;
3531 rtx x
= *px
, y
= *py
;
3532 int unsignedp
= *punsignedp
;
3533 enum mode_class
class;
3535 class = GET_MODE_CLASS (mode
);
3537 /* They could both be VOIDmode if both args are immediate constants,
3538 but we should fold that at an earlier stage.
3539 With no special code here, this will call abort,
3540 reminding the programmer to implement such folding. */
3542 if (mode
!= BLKmode
&& flag_force_mem
)
3544 /* Load duplicate non-volatile operands once. */
3545 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3547 x
= force_not_mem (x
);
3552 x
= force_not_mem (x
);
3553 y
= force_not_mem (y
);
3557 /* If we are inside an appropriately-short loop and one operand is an
3558 expensive constant, force it into a register. */
3559 if (CONSTANT_P (x
) && preserve_subexpressions_p ()
3560 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3561 x
= force_reg (mode
, x
);
3563 if (CONSTANT_P (y
) && preserve_subexpressions_p ()
3564 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3565 y
= force_reg (mode
, y
);
3568 /* Abort if we have a non-canonical comparison. The RTL documentation
3569 states that canonical comparisons are required only for targets which
3571 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3575 /* Don't let both operands fail to indicate the mode. */
3576 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3577 x
= force_reg (mode
, x
);
3579 /* Handle all BLKmode compares. */
3581 if (mode
== BLKmode
)
3583 enum machine_mode cmp_mode
, result_mode
;
3584 enum insn_code cmp_code
;
3589 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3595 x
= protect_from_queue (x
, 0);
3596 y
= protect_from_queue (y
, 0);
3597 size
= protect_from_queue (size
, 0);
3599 /* Try to use a memory block compare insn - either cmpstr
3600 or cmpmem will do. */
3601 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3602 cmp_mode
!= VOIDmode
;
3603 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3605 cmp_code
= cmpmem_optab
[cmp_mode
];
3606 if (cmp_code
== CODE_FOR_nothing
)
3607 cmp_code
= cmpstr_optab
[cmp_mode
];
3608 if (cmp_code
== CODE_FOR_nothing
)
3611 /* Must make sure the size fits the insn's mode. */
3612 if ((GET_CODE (size
) == CONST_INT
3613 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3614 || (GET_MODE_BITSIZE (GET_MODE (size
))
3615 > GET_MODE_BITSIZE (cmp_mode
)))
3618 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3619 result
= gen_reg_rtx (result_mode
);
3620 size
= convert_to_mode (cmp_mode
, size
, 1);
3621 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3625 *pmode
= result_mode
;
3629 /* Otherwise call a library function, memcmp if we've got it,
3631 #ifdef TARGET_MEM_FUNCTIONS
3632 libfunc
= memcmp_libfunc
;
3633 length_type
= sizetype
;
3635 libfunc
= bcmp_libfunc
;
3636 length_type
= integer_type_node
;
3638 result_mode
= TYPE_MODE (integer_type_node
);
3639 cmp_mode
= TYPE_MODE (length_type
);
3640 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3641 TREE_UNSIGNED (length_type
));
3643 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3650 *pmode
= result_mode
;
3656 if (can_compare_p (*pcomparison
, mode
, purpose
))
3659 /* Handle a lib call just for the mode we are using. */
3661 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3663 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3666 /* If we want unsigned, and this mode has a distinct unsigned
3667 comparison routine, use that. */
3668 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3669 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3671 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3672 word_mode
, 2, x
, mode
, y
, mode
);
3674 /* Integer comparison returns a result that must be compared against 1,
3675 so that even if we do an unsigned compare afterward,
3676 there is still a value that can represent the result "less than". */
3683 if (class == MODE_FLOAT
)
3684 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3690 /* Before emitting an insn with code ICODE, make sure that X, which is going
3691 to be used for operand OPNUM of the insn, is converted from mode MODE to
3692 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3693 that it is accepted by the operand predicate. Return the new value. */
3696 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3697 enum machine_mode wider_mode
, int unsignedp
)
3699 x
= protect_from_queue (x
, 0);
3701 if (mode
!= wider_mode
)
3702 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3704 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3705 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3709 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3715 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3716 we can do the comparison.
3717 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3718 be NULL_RTX which indicates that only a comparison is to be generated. */
3721 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3722 enum rtx_code comparison
, int unsignedp
, rtx label
)
3724 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3725 enum mode_class
class = GET_MODE_CLASS (mode
);
3726 enum machine_mode wider_mode
= mode
;
3728 /* Try combined insns first. */
3731 enum insn_code icode
;
3732 PUT_MODE (test
, wider_mode
);
3736 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3738 if (icode
!= CODE_FOR_nothing
3739 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3741 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3742 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3743 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3748 /* Handle some compares against zero. */
3749 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3750 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3752 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3753 emit_insn (GEN_FCN (icode
) (x
));
3755 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3759 /* Handle compares for which there is a directly suitable insn. */
3761 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3762 if (icode
!= CODE_FOR_nothing
)
3764 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3765 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3766 emit_insn (GEN_FCN (icode
) (x
, y
));
3768 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3772 if (class != MODE_INT
&& class != MODE_FLOAT
3773 && class != MODE_COMPLEX_FLOAT
)
3776 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3778 while (wider_mode
!= VOIDmode
);
3783 /* Generate code to compare X with Y so that the condition codes are
3784 set and to jump to LABEL if the condition is true. If X is a
3785 constant and Y is not a constant, then the comparison is swapped to
3786 ensure that the comparison RTL has the canonical form.
3788 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3789 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3790 the proper branch condition code.
3792 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3794 MODE is the mode of the inputs (in case they are const_int).
3796 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3797 be passed unchanged to emit_cmp_insn, then potentially converted into an
3798 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3801 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3802 enum machine_mode mode
, int unsignedp
, rtx label
)
3804 rtx op0
= x
, op1
= y
;
3806 /* Swap operands and condition to ensure canonical RTL. */
3807 if (swap_commutative_operands_p (x
, y
))
3809 /* If we're not emitting a branch, this means some caller
3815 comparison
= swap_condition (comparison
);
3819 /* If OP0 is still a constant, then both X and Y must be constants. Force
3820 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3822 if (CONSTANT_P (op0
))
3823 op0
= force_reg (mode
, op0
);
3828 comparison
= unsigned_condition (comparison
);
3830 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3832 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3835 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3838 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3839 enum machine_mode mode
, int unsignedp
)
3841 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3844 /* Emit a library call comparison between floating point X and Y.
3845 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3848 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3849 enum machine_mode
*pmode
, int *punsignedp
)
3851 enum rtx_code comparison
= *pcomparison
;
3852 enum rtx_code swapped
= swap_condition (comparison
);
3853 rtx x
= protect_from_queue (*px
, 0);
3854 rtx y
= protect_from_queue (*py
, 0);
3855 enum machine_mode orig_mode
= GET_MODE (x
);
3856 enum machine_mode mode
;
3857 rtx value
, target
, insns
, equiv
;
3860 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3862 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3865 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3868 tmp
= x
; x
= y
; y
= tmp
;
3869 comparison
= swapped
;
3874 if (mode
== VOIDmode
)
3877 if (mode
!= orig_mode
)
3879 x
= convert_to_mode (mode
, x
, 0);
3880 y
= convert_to_mode (mode
, y
, 0);
3883 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3884 the RTL. The allows the RTL optimizers to delete the libcall if the
3885 condition can be determined at compile-time. */
3886 if (comparison
== UNORDERED
)
3888 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3889 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3890 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3891 temp
, const_true_rtx
, equiv
);
3895 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3896 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3898 rtx true_rtx
, false_rtx
;
3903 true_rtx
= const0_rtx
;
3904 false_rtx
= const_true_rtx
;
3908 true_rtx
= const_true_rtx
;
3909 false_rtx
= const0_rtx
;
3913 true_rtx
= const1_rtx
;
3914 false_rtx
= const0_rtx
;
3918 true_rtx
= const0_rtx
;
3919 false_rtx
= constm1_rtx
;
3923 true_rtx
= constm1_rtx
;
3924 false_rtx
= const0_rtx
;
3928 true_rtx
= const0_rtx
;
3929 false_rtx
= const1_rtx
;
3935 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3936 equiv
, true_rtx
, false_rtx
);
3941 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3942 word_mode
, 2, x
, mode
, y
, mode
);
3943 insns
= get_insns ();
3946 target
= gen_reg_rtx (word_mode
);
3947 emit_libcall_block (insns
, target
, value
, equiv
);
3950 if (comparison
== UNORDERED
3951 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3957 *pcomparison
= comparison
;
3961 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3964 emit_indirect_jump (rtx loc
)
3966 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3968 loc
= copy_to_mode_reg (Pmode
, loc
);
3970 emit_jump_insn (gen_indirect_jump (loc
));
3974 #ifdef HAVE_conditional_move
3976 /* Emit a conditional move instruction if the machine supports one for that
3977 condition and machine mode.
3979 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3980 the mode to use should they be constants. If it is VOIDmode, they cannot
3983 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3984 should be stored there. MODE is the mode to use should they be constants.
3985 If it is VOIDmode, they cannot both be constants.
3987 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3988 is not supported. */
3991 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3992 enum machine_mode cmode
, rtx op2
, rtx op3
,
3993 enum machine_mode mode
, int unsignedp
)
3995 rtx tem
, subtarget
, comparison
, insn
;
3996 enum insn_code icode
;
3997 enum rtx_code reversed
;
3999 /* If one operand is constant, make it the second one. Only do this
4000 if the other operand is not constant as well. */
4002 if (swap_commutative_operands_p (op0
, op1
))
4007 code
= swap_condition (code
);
4010 /* get_condition will prefer to generate LT and GT even if the old
4011 comparison was against zero, so undo that canonicalization here since
4012 comparisons against zero are cheaper. */
4013 if (code
== LT
&& op1
== const1_rtx
)
4014 code
= LE
, op1
= const0_rtx
;
4015 else if (code
== GT
&& op1
== constm1_rtx
)
4016 code
= GE
, op1
= const0_rtx
;
4018 if (cmode
== VOIDmode
)
4019 cmode
= GET_MODE (op0
);
4021 if (swap_commutative_operands_p (op2
, op3
)
4022 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4031 if (mode
== VOIDmode
)
4032 mode
= GET_MODE (op2
);
4034 icode
= movcc_gen_code
[mode
];
4036 if (icode
== CODE_FOR_nothing
)
4041 op2
= force_not_mem (op2
);
4042 op3
= force_not_mem (op3
);
4046 target
= protect_from_queue (target
, 1);
4048 target
= gen_reg_rtx (mode
);
4054 op2
= protect_from_queue (op2
, 0);
4055 op3
= protect_from_queue (op3
, 0);
4057 /* If the insn doesn't accept these operands, put them in pseudos. */
4059 if (! (*insn_data
[icode
].operand
[0].predicate
)
4060 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4061 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4063 if (! (*insn_data
[icode
].operand
[2].predicate
)
4064 (op2
, insn_data
[icode
].operand
[2].mode
))
4065 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4067 if (! (*insn_data
[icode
].operand
[3].predicate
)
4068 (op3
, insn_data
[icode
].operand
[3].mode
))
4069 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4071 /* Everything should now be in the suitable form, so emit the compare insn
4072 and then the conditional move. */
4075 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4077 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4078 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4079 return NULL and let the caller figure out how best to deal with this
4081 if (GET_CODE (comparison
) != code
)
4084 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4086 /* If that failed, then give up. */
4092 if (subtarget
!= target
)
4093 convert_move (target
, subtarget
, 0);
4098 /* Return nonzero if a conditional move of mode MODE is supported.
4100 This function is for combine so it can tell whether an insn that looks
4101 like a conditional move is actually supported by the hardware. If we
4102 guess wrong we lose a bit on optimization, but that's it. */
4103 /* ??? sparc64 supports conditionally moving integers values based on fp
4104 comparisons, and vice versa. How do we handle them? */
4107 can_conditionally_move_p (enum machine_mode mode
)
4109 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
4115 #endif /* HAVE_conditional_move */
4117 /* Emit a conditional addition instruction if the machine supports one for that
4118 condition and machine mode.
4120 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4121 the mode to use should they be constants. If it is VOIDmode, they cannot
4124 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4125 should be stored there. MODE is the mode to use should they be constants.
4126 If it is VOIDmode, they cannot both be constants.
4128 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4129 is not supported. */
4132 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4133 enum machine_mode cmode
, rtx op2
, rtx op3
,
4134 enum machine_mode mode
, int unsignedp
)
4136 rtx tem
, subtarget
, comparison
, insn
;
4137 enum insn_code icode
;
4138 enum rtx_code reversed
;
4140 /* If one operand is constant, make it the second one. Only do this
4141 if the other operand is not constant as well. */
4143 if (swap_commutative_operands_p (op0
, op1
))
4148 code
= swap_condition (code
);
4151 /* get_condition will prefer to generate LT and GT even if the old
4152 comparison was against zero, so undo that canonicalization here since
4153 comparisons against zero are cheaper. */
4154 if (code
== LT
&& op1
== const1_rtx
)
4155 code
= LE
, op1
= const0_rtx
;
4156 else if (code
== GT
&& op1
== constm1_rtx
)
4157 code
= GE
, op1
= const0_rtx
;
4159 if (cmode
== VOIDmode
)
4160 cmode
= GET_MODE (op0
);
4162 if (swap_commutative_operands_p (op2
, op3
)
4163 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4172 if (mode
== VOIDmode
)
4173 mode
= GET_MODE (op2
);
4175 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4177 if (icode
== CODE_FOR_nothing
)
4182 op2
= force_not_mem (op2
);
4183 op3
= force_not_mem (op3
);
4187 target
= protect_from_queue (target
, 1);
4189 target
= gen_reg_rtx (mode
);
4195 op2
= protect_from_queue (op2
, 0);
4196 op3
= protect_from_queue (op3
, 0);
4198 /* If the insn doesn't accept these operands, put them in pseudos. */
4200 if (! (*insn_data
[icode
].operand
[0].predicate
)
4201 (subtarget
, insn_data
[icode
].operand
[0].mode
))
4202 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4204 if (! (*insn_data
[icode
].operand
[2].predicate
)
4205 (op2
, insn_data
[icode
].operand
[2].mode
))
4206 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4208 if (! (*insn_data
[icode
].operand
[3].predicate
)
4209 (op3
, insn_data
[icode
].operand
[3].mode
))
4210 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4212 /* Everything should now be in the suitable form, so emit the compare insn
4213 and then the conditional move. */
4216 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4218 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4219 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4220 return NULL and let the caller figure out how best to deal with this
4222 if (GET_CODE (comparison
) != code
)
4225 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4227 /* If that failed, then give up. */
4233 if (subtarget
!= target
)
4234 convert_move (target
, subtarget
, 0);
4239 /* These functions attempt to generate an insn body, rather than
4240 emitting the insn, but if the gen function already emits them, we
4241 make no attempt to turn them back into naked patterns.
4243 They do not protect from queued increments,
4244 because they may be used 1) in protect_from_queue itself
4245 and 2) in other passes where there is no queue. */
4247 /* Generate and return an insn body to add Y to X. */
4250 gen_add2_insn (rtx x
, rtx y
)
4252 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4254 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4255 (x
, insn_data
[icode
].operand
[0].mode
))
4256 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4257 (x
, insn_data
[icode
].operand
[1].mode
))
4258 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4259 (y
, insn_data
[icode
].operand
[2].mode
)))
4262 return (GEN_FCN (icode
) (x
, x
, y
));
4265 /* Generate and return an insn body to add r1 and c,
4266 storing the result in r0. */
4268 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4270 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4272 if (icode
== CODE_FOR_nothing
4273 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4274 (r0
, insn_data
[icode
].operand
[0].mode
))
4275 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4276 (r1
, insn_data
[icode
].operand
[1].mode
))
4277 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4278 (c
, insn_data
[icode
].operand
[2].mode
)))
4281 return (GEN_FCN (icode
) (r0
, r1
, c
));
4285 have_add2_insn (rtx x
, rtx y
)
4289 if (GET_MODE (x
) == VOIDmode
)
4292 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4294 if (icode
== CODE_FOR_nothing
)
4297 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4298 (x
, insn_data
[icode
].operand
[0].mode
))
4299 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4300 (x
, insn_data
[icode
].operand
[1].mode
))
4301 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4302 (y
, insn_data
[icode
].operand
[2].mode
)))
4308 /* Generate and return an insn body to subtract Y from X. */
4311 gen_sub2_insn (rtx x
, rtx y
)
4313 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4315 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4316 (x
, insn_data
[icode
].operand
[0].mode
))
4317 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4318 (x
, insn_data
[icode
].operand
[1].mode
))
4319 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4320 (y
, insn_data
[icode
].operand
[2].mode
)))
4323 return (GEN_FCN (icode
) (x
, x
, y
));
4326 /* Generate and return an insn body to subtract r1 and c,
4327 storing the result in r0. */
4329 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4331 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4333 if (icode
== CODE_FOR_nothing
4334 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4335 (r0
, insn_data
[icode
].operand
[0].mode
))
4336 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4337 (r1
, insn_data
[icode
].operand
[1].mode
))
4338 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4339 (c
, insn_data
[icode
].operand
[2].mode
)))
4342 return (GEN_FCN (icode
) (r0
, r1
, c
));
4346 have_sub2_insn (rtx x
, rtx y
)
4350 if (GET_MODE (x
) == VOIDmode
)
4353 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4355 if (icode
== CODE_FOR_nothing
)
4358 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4359 (x
, insn_data
[icode
].operand
[0].mode
))
4360 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4361 (x
, insn_data
[icode
].operand
[1].mode
))
4362 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4363 (y
, insn_data
[icode
].operand
[2].mode
)))
4369 /* Generate the body of an instruction to copy Y into X.
4370 It may be a list of insns, if one insn isn't enough. */
4373 gen_move_insn (rtx x
, rtx y
)
4378 emit_move_insn_1 (x
, y
);
4384 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4385 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4386 no such operation exists, CODE_FOR_nothing will be returned. */
4389 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4393 #ifdef HAVE_ptr_extend
4395 return CODE_FOR_ptr_extend
;
4398 tab
= unsignedp
? zext_optab
: sext_optab
;
4399 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4402 /* Generate the body of an insn to extend Y (with mode MFROM)
4403 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4406 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4407 enum machine_mode mfrom
, int unsignedp
)
4409 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4410 return GEN_FCN (icode
) (x
, y
);
4413 /* can_fix_p and can_float_p say whether the target machine
4414 can directly convert a given fixed point type to
4415 a given floating point type, or vice versa.
4416 The returned value is the CODE_FOR_... value to use,
4417 or CODE_FOR_nothing if these modes cannot be directly converted.
4419 *TRUNCP_PTR is set to 1 if it is necessary to output
4420 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4422 static enum insn_code
4423 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4424 int unsignedp
, int *truncp_ptr
)
4427 enum insn_code icode
;
4429 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4430 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4431 if (icode
!= CODE_FOR_nothing
)
4437 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4438 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4439 if (icode
!= CODE_FOR_nothing
4440 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4447 return CODE_FOR_nothing
;
4450 static enum insn_code
4451 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4456 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4457 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4460 /* Generate code to convert FROM to floating point
4461 and store in TO. FROM must be fixed point and not VOIDmode.
4462 UNSIGNEDP nonzero means regard FROM as unsigned.
4463 Normally this is done by correcting the final value
4464 if it is negative. */
4467 expand_float (rtx to
, rtx from
, int unsignedp
)
4469 enum insn_code icode
;
4471 enum machine_mode fmode
, imode
;
4473 /* Crash now, because we won't be able to decide which mode to use. */
4474 if (GET_MODE (from
) == VOIDmode
)
4477 /* Look for an insn to do the conversion. Do it in the specified
4478 modes if possible; otherwise convert either input, output or both to
4479 wider mode. If the integer mode is wider than the mode of FROM,
4480 we can do the conversion signed even if the input is unsigned. */
4482 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4483 fmode
= GET_MODE_WIDER_MODE (fmode
))
4484 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4485 imode
= GET_MODE_WIDER_MODE (imode
))
4487 int doing_unsigned
= unsignedp
;
4489 if (fmode
!= GET_MODE (to
)
4490 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4493 icode
= can_float_p (fmode
, imode
, unsignedp
);
4494 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4495 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4497 if (icode
!= CODE_FOR_nothing
)
4499 to
= protect_from_queue (to
, 1);
4500 from
= protect_from_queue (from
, 0);
4502 if (imode
!= GET_MODE (from
))
4503 from
= convert_to_mode (imode
, from
, unsignedp
);
4505 if (fmode
!= GET_MODE (to
))
4506 target
= gen_reg_rtx (fmode
);
4508 emit_unop_insn (icode
, target
, from
,
4509 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4512 convert_move (to
, target
, 0);
4517 /* Unsigned integer, and no way to convert directly.
4518 Convert as signed, then conditionally adjust the result. */
4521 rtx label
= gen_label_rtx ();
4523 REAL_VALUE_TYPE offset
;
4527 to
= protect_from_queue (to
, 1);
4528 from
= protect_from_queue (from
, 0);
4531 from
= force_not_mem (from
);
4533 /* Look for a usable floating mode FMODE wider than the source and at
4534 least as wide as the target. Using FMODE will avoid rounding woes
4535 with unsigned values greater than the signed maximum value. */
4537 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4538 fmode
= GET_MODE_WIDER_MODE (fmode
))
4539 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4540 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4543 if (fmode
== VOIDmode
)
4545 /* There is no such mode. Pretend the target is wide enough. */
4546 fmode
= GET_MODE (to
);
4548 /* Avoid double-rounding when TO is narrower than FROM. */
4549 if ((significand_size (fmode
) + 1)
4550 < GET_MODE_BITSIZE (GET_MODE (from
)))
4553 rtx neglabel
= gen_label_rtx ();
4555 /* Don't use TARGET if it isn't a register, is a hard register,
4556 or is the wrong mode. */
4557 if (GET_CODE (target
) != REG
4558 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4559 || GET_MODE (target
) != fmode
)
4560 target
= gen_reg_rtx (fmode
);
4562 imode
= GET_MODE (from
);
4563 do_pending_stack_adjust ();
4565 /* Test whether the sign bit is set. */
4566 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4569 /* The sign bit is not set. Convert as signed. */
4570 expand_float (target
, from
, 0);
4571 emit_jump_insn (gen_jump (label
));
4574 /* The sign bit is set.
4575 Convert to a usable (positive signed) value by shifting right
4576 one bit, while remembering if a nonzero bit was shifted
4577 out; i.e., compute (from & 1) | (from >> 1). */
4579 emit_label (neglabel
);
4580 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4581 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4582 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4584 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4586 expand_float (target
, temp
, 0);
4588 /* Multiply by 2 to undo the shift above. */
4589 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4590 target
, 0, OPTAB_LIB_WIDEN
);
4592 emit_move_insn (target
, temp
);
4594 do_pending_stack_adjust ();
4600 /* If we are about to do some arithmetic to correct for an
4601 unsigned operand, do it in a pseudo-register. */
4603 if (GET_MODE (to
) != fmode
4604 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4605 target
= gen_reg_rtx (fmode
);
4607 /* Convert as signed integer to floating. */
4608 expand_float (target
, from
, 0);
4610 /* If FROM is negative (and therefore TO is negative),
4611 correct its value by 2**bitwidth. */
4613 do_pending_stack_adjust ();
4614 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4618 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4619 temp
= expand_binop (fmode
, add_optab
, target
,
4620 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4621 target
, 0, OPTAB_LIB_WIDEN
);
4623 emit_move_insn (target
, temp
);
4625 do_pending_stack_adjust ();
4630 /* No hardware instruction available; call a library routine. */
4635 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4637 to
= protect_from_queue (to
, 1);
4638 from
= protect_from_queue (from
, 0);
4640 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4641 from
= convert_to_mode (SImode
, from
, unsignedp
);
4644 from
= force_not_mem (from
);
4646 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4652 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4653 GET_MODE (to
), 1, from
,
4655 insns
= get_insns ();
4658 emit_libcall_block (insns
, target
, value
,
4659 gen_rtx_FLOAT (GET_MODE (to
), from
));
4664 /* Copy result to requested destination
4665 if we have been computing in a temp location. */
4669 if (GET_MODE (target
) == GET_MODE (to
))
4670 emit_move_insn (to
, target
);
4672 convert_move (to
, target
, 0);
4676 /* expand_fix: generate code to convert FROM to fixed point
4677 and store in TO. FROM must be floating point. */
4682 rtx temp
= gen_reg_rtx (GET_MODE (x
));
4683 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
4687 expand_fix (rtx to
, rtx from
, int unsignedp
)
4689 enum insn_code icode
;
4691 enum machine_mode fmode
, imode
;
4694 /* We first try to find a pair of modes, one real and one integer, at
4695 least as wide as FROM and TO, respectively, in which we can open-code
4696 this conversion. If the integer mode is wider than the mode of TO,
4697 we can do the conversion either signed or unsigned. */
4699 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4700 fmode
= GET_MODE_WIDER_MODE (fmode
))
4701 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4702 imode
= GET_MODE_WIDER_MODE (imode
))
4704 int doing_unsigned
= unsignedp
;
4706 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4707 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4708 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4710 if (icode
!= CODE_FOR_nothing
)
4712 to
= protect_from_queue (to
, 1);
4713 from
= protect_from_queue (from
, 0);
4715 if (fmode
!= GET_MODE (from
))
4716 from
= convert_to_mode (fmode
, from
, 0);
4719 from
= ftruncify (from
);
4721 if (imode
!= GET_MODE (to
))
4722 target
= gen_reg_rtx (imode
);
4724 emit_unop_insn (icode
, target
, from
,
4725 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4727 convert_move (to
, target
, unsignedp
);
4732 /* For an unsigned conversion, there is one more way to do it.
4733 If we have a signed conversion, we generate code that compares
4734 the real value to the largest representable positive number. If if
4735 is smaller, the conversion is done normally. Otherwise, subtract
4736 one plus the highest signed number, convert, and add it back.
4738 We only need to check all real modes, since we know we didn't find
4739 anything with a wider integer mode.
4741 This code used to extend FP value into mode wider than the destination.
4742 This is not needed. Consider, for instance conversion from SFmode
4745 The hot path trought the code is dealing with inputs smaller than 2^63
4746 and doing just the conversion, so there is no bits to lose.
4748 In the other path we know the value is positive in the range 2^63..2^64-1
4749 inclusive. (as for other imput overflow happens and result is undefined)
4750 So we know that the most important bit set in mantissa corresponds to
4751 2^63. The subtraction of 2^63 should not generate any rounding as it
4752 simply clears out that bit. The rest is trivial. */
4754 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4755 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4756 fmode
= GET_MODE_WIDER_MODE (fmode
))
4757 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4761 REAL_VALUE_TYPE offset
;
4762 rtx limit
, lab1
, lab2
, insn
;
4764 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4765 real_2expN (&offset
, bitsize
- 1);
4766 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4767 lab1
= gen_label_rtx ();
4768 lab2
= gen_label_rtx ();
4771 to
= protect_from_queue (to
, 1);
4772 from
= protect_from_queue (from
, 0);
4775 from
= force_not_mem (from
);
4777 if (fmode
!= GET_MODE (from
))
4778 from
= convert_to_mode (fmode
, from
, 0);
4780 /* See if we need to do the subtraction. */
4781 do_pending_stack_adjust ();
4782 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4785 /* If not, do the signed "fix" and branch around fixup code. */
4786 expand_fix (to
, from
, 0);
4787 emit_jump_insn (gen_jump (lab2
));
4790 /* Otherwise, subtract 2**(N-1), convert to signed number,
4791 then add 2**(N-1). Do the addition using XOR since this
4792 will often generate better code. */
4794 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4795 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4796 expand_fix (to
, target
, 0);
4797 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4799 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4801 to
, 1, OPTAB_LIB_WIDEN
);
4804 emit_move_insn (to
, target
);
4808 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4809 != CODE_FOR_nothing
)
4811 /* Make a place for a REG_NOTE and add it. */
4812 insn
= emit_move_insn (to
, to
);
4813 set_unique_reg_note (insn
,
4815 gen_rtx_fmt_e (UNSIGNED_FIX
,
4823 /* We can't do it with an insn, so use a library call. But first ensure
4824 that the mode of TO is at least as wide as SImode, since those are the
4825 only library calls we know about. */
4827 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4829 target
= gen_reg_rtx (SImode
);
4831 expand_fix (target
, from
, unsignedp
);
4839 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4840 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4844 to
= protect_from_queue (to
, 1);
4845 from
= protect_from_queue (from
, 0);
4848 from
= force_not_mem (from
);
4852 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4853 GET_MODE (to
), 1, from
,
4855 insns
= get_insns ();
4858 emit_libcall_block (insns
, target
, value
,
4859 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4860 GET_MODE (to
), from
));
4865 if (GET_MODE (to
) == GET_MODE (target
))
4866 emit_move_insn (to
, target
);
4868 convert_move (to
, target
, 0);
4872 /* Report whether we have an instruction to perform the operation
4873 specified by CODE on operands of mode MODE. */
4875 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4877 return (code_to_optab
[(int) code
] != 0
4878 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4879 != CODE_FOR_nothing
));
4882 /* Create a blank optab. */
4887 optab op
= ggc_alloc (sizeof (struct optab
));
4888 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4890 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4891 op
->handlers
[i
].libfunc
= 0;
4897 static convert_optab
4898 new_convert_optab (void)
4901 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4902 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4903 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4905 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4906 op
->handlers
[i
][j
].libfunc
= 0;
4911 /* Same, but fill in its code as CODE, and write it into the
4912 code_to_optab table. */
4914 init_optab (enum rtx_code code
)
4916 optab op
= new_optab ();
4918 code_to_optab
[(int) code
] = op
;
4922 /* Same, but fill in its code as CODE, and do _not_ write it into
4923 the code_to_optab table. */
4925 init_optabv (enum rtx_code code
)
4927 optab op
= new_optab ();
4932 /* Conversion optabs never go in the code_to_optab table. */
4933 static inline convert_optab
4934 init_convert_optab (enum rtx_code code
)
4936 convert_optab op
= new_convert_optab ();
4941 /* Initialize the libfunc fields of an entire group of entries in some
4942 optab. Each entry is set equal to a string consisting of a leading
4943 pair of underscores followed by a generic operation name followed by
4944 a mode name (downshifted to lowercase) followed by a single character
4945 representing the number of operands for the given operation (which is
4946 usually one of the characters '2', '3', or '4').
4948 OPTABLE is the table in which libfunc fields are to be initialized.
4949 FIRST_MODE is the first machine mode index in the given optab to
4951 LAST_MODE is the last machine mode index in the given optab to
4953 OPNAME is the generic (string) name of the operation.
4954 SUFFIX is the character which specifies the number of operands for
4955 the given generic operation.
4959 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4960 const char *opname
, int suffix
)
4963 unsigned opname_len
= strlen (opname
);
4965 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4966 mode
= (enum machine_mode
) ((int) mode
+ 1))
4968 const char *mname
= GET_MODE_NAME (mode
);
4969 unsigned mname_len
= strlen (mname
);
4970 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4977 for (q
= opname
; *q
; )
4979 for (q
= mname
; *q
; q
++)
4980 *p
++ = TOLOWER (*q
);
4984 optable
->handlers
[(int) mode
].libfunc
4985 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4989 /* Initialize the libfunc fields of an entire group of entries in some
4990 optab which correspond to all integer mode operations. The parameters
4991 have the same meaning as similarly named ones for the `init_libfuncs'
4992 routine. (See above). */
4995 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4997 int maxsize
= 2*BITS_PER_WORD
;
4998 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4999 maxsize
= LONG_LONG_TYPE_SIZE
;
5000 init_libfuncs (optable
, word_mode
,
5001 mode_for_size (maxsize
, MODE_INT
, 0),
5005 /* Initialize the libfunc fields of an entire group of entries in some
5006 optab which correspond to all real mode operations. The parameters
5007 have the same meaning as similarly named ones for the `init_libfuncs'
5008 routine. (See above). */
5011 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
5013 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
5016 /* Initialize the libfunc fields of an entire group of entries of an
5017 inter-mode-class conversion optab. The string formation rules are
5018 similar to the ones for init_libfuncs, above, but instead of having
5019 a mode name and an operand count these functions have two mode names
5020 and no operand count. */
5022 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5023 enum mode_class from_class
,
5024 enum mode_class to_class
)
5026 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
5027 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
5028 size_t opname_len
= strlen (opname
);
5029 size_t max_mname_len
= 0;
5031 enum machine_mode fmode
, tmode
;
5032 const char *fname
, *tname
;
5034 char *libfunc_name
, *suffix
;
5037 for (fmode
= first_from_mode
;
5039 fmode
= GET_MODE_WIDER_MODE (fmode
))
5040 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
5042 for (tmode
= first_to_mode
;
5044 tmode
= GET_MODE_WIDER_MODE (tmode
))
5045 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
5047 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5048 libfunc_name
[0] = '_';
5049 libfunc_name
[1] = '_';
5050 memcpy (&libfunc_name
[2], opname
, opname_len
);
5051 suffix
= libfunc_name
+ opname_len
+ 2;
5053 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
5054 fmode
= GET_MODE_WIDER_MODE (fmode
))
5055 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
5056 tmode
= GET_MODE_WIDER_MODE (tmode
))
5058 fname
= GET_MODE_NAME (fmode
);
5059 tname
= GET_MODE_NAME (tmode
);
5062 for (q
= fname
; *q
; p
++, q
++)
5064 for (q
= tname
; *q
; p
++, q
++)
5069 tab
->handlers
[tmode
][fmode
].libfunc
5070 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5075 /* Initialize the libfunc fields of an entire group of entries of an
5076 intra-mode-class conversion optab. The string formation rules are
5077 similar to the ones for init_libfunc, above. WIDENING says whether
5078 the optab goes from narrow to wide modes or vice versa. These functions
5079 have two mode names _and_ an operand count. */
5081 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
5082 enum mode_class
class, bool widening
)
5084 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
5085 size_t opname_len
= strlen (opname
);
5086 size_t max_mname_len
= 0;
5088 enum machine_mode nmode
, wmode
;
5089 const char *nname
, *wname
;
5091 char *libfunc_name
, *suffix
;
5094 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5095 nmode
= GET_MODE_WIDER_MODE (nmode
))
5096 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
5098 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
5099 libfunc_name
[0] = '_';
5100 libfunc_name
[1] = '_';
5101 memcpy (&libfunc_name
[2], opname
, opname_len
);
5102 suffix
= libfunc_name
+ opname_len
+ 2;
5104 for (nmode
= first_mode
; nmode
!= VOIDmode
;
5105 nmode
= GET_MODE_WIDER_MODE (nmode
))
5106 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
5107 wmode
= GET_MODE_WIDER_MODE (wmode
))
5109 nname
= GET_MODE_NAME (nmode
);
5110 wname
= GET_MODE_NAME (wmode
);
5113 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
5115 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
5121 tab
->handlers
[widening
? wmode
: nmode
]
5122 [widening
? nmode
: wmode
].libfunc
5123 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
5130 init_one_libfunc (const char *name
)
5134 /* Create a FUNCTION_DECL that can be passed to
5135 targetm.encode_section_info. */
5136 /* ??? We don't have any type information except for this is
5137 a function. Pretend this is "int foo()". */
5138 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
5139 build_function_type (integer_type_node
, NULL_TREE
));
5140 DECL_ARTIFICIAL (decl
) = 1;
5141 DECL_EXTERNAL (decl
) = 1;
5142 TREE_PUBLIC (decl
) = 1;
5144 symbol
= XEXP (DECL_RTL (decl
), 0);
5146 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5147 are the flags assigned by targetm.encode_section_info. */
5148 SYMBOL_REF_DECL (symbol
) = 0;
5153 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5154 MODE to NAME, which should be either 0 or a string constant. */
5156 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
5159 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
5161 optable
->handlers
[mode
].libfunc
= 0;
5164 /* Call this to reset the function entry for one conversion optab
5165 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5166 either 0 or a string constant. */
5168 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
5169 enum machine_mode fmode
, const char *name
)
5172 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
5174 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
5177 /* Call this once to initialize the contents of the optabs
5178 appropriately for the current target machine. */
5185 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5187 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5188 setcc_gen_code
[i
] = CODE_FOR_nothing
;
5190 #ifdef HAVE_conditional_move
5191 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5192 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5195 add_optab
= init_optab (PLUS
);
5196 addv_optab
= init_optabv (PLUS
);
5197 sub_optab
= init_optab (MINUS
);
5198 subv_optab
= init_optabv (MINUS
);
5199 smul_optab
= init_optab (MULT
);
5200 smulv_optab
= init_optabv (MULT
);
5201 smul_highpart_optab
= init_optab (UNKNOWN
);
5202 umul_highpart_optab
= init_optab (UNKNOWN
);
5203 smul_widen_optab
= init_optab (UNKNOWN
);
5204 umul_widen_optab
= init_optab (UNKNOWN
);
5205 sdiv_optab
= init_optab (DIV
);
5206 sdivv_optab
= init_optabv (DIV
);
5207 sdivmod_optab
= init_optab (UNKNOWN
);
5208 udiv_optab
= init_optab (UDIV
);
5209 udivmod_optab
= init_optab (UNKNOWN
);
5210 smod_optab
= init_optab (MOD
);
5211 umod_optab
= init_optab (UMOD
);
5212 ftrunc_optab
= init_optab (UNKNOWN
);
5213 and_optab
= init_optab (AND
);
5214 ior_optab
= init_optab (IOR
);
5215 xor_optab
= init_optab (XOR
);
5216 ashl_optab
= init_optab (ASHIFT
);
5217 ashr_optab
= init_optab (ASHIFTRT
);
5218 lshr_optab
= init_optab (LSHIFTRT
);
5219 rotl_optab
= init_optab (ROTATE
);
5220 rotr_optab
= init_optab (ROTATERT
);
5221 smin_optab
= init_optab (SMIN
);
5222 smax_optab
= init_optab (SMAX
);
5223 umin_optab
= init_optab (UMIN
);
5224 umax_optab
= init_optab (UMAX
);
5225 pow_optab
= init_optab (UNKNOWN
);
5226 atan2_optab
= init_optab (UNKNOWN
);
5228 /* These three have codes assigned exclusively for the sake of
5230 mov_optab
= init_optab (SET
);
5231 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5232 cmp_optab
= init_optab (COMPARE
);
5234 ucmp_optab
= init_optab (UNKNOWN
);
5235 tst_optab
= init_optab (UNKNOWN
);
5237 eq_optab
= init_optab (EQ
);
5238 ne_optab
= init_optab (NE
);
5239 gt_optab
= init_optab (GT
);
5240 ge_optab
= init_optab (GE
);
5241 lt_optab
= init_optab (LT
);
5242 le_optab
= init_optab (LE
);
5243 unord_optab
= init_optab (UNORDERED
);
5245 neg_optab
= init_optab (NEG
);
5246 negv_optab
= init_optabv (NEG
);
5247 abs_optab
= init_optab (ABS
);
5248 absv_optab
= init_optabv (ABS
);
5249 addcc_optab
= init_optab (UNKNOWN
);
5250 one_cmpl_optab
= init_optab (NOT
);
5251 ffs_optab
= init_optab (FFS
);
5252 clz_optab
= init_optab (CLZ
);
5253 ctz_optab
= init_optab (CTZ
);
5254 popcount_optab
= init_optab (POPCOUNT
);
5255 parity_optab
= init_optab (PARITY
);
5256 sqrt_optab
= init_optab (SQRT
);
5257 floor_optab
= init_optab (UNKNOWN
);
5258 ceil_optab
= init_optab (UNKNOWN
);
5259 round_optab
= init_optab (UNKNOWN
);
5260 btrunc_optab
= init_optab (UNKNOWN
);
5261 nearbyint_optab
= init_optab (UNKNOWN
);
5262 sin_optab
= init_optab (UNKNOWN
);
5263 cos_optab
= init_optab (UNKNOWN
);
5264 exp_optab
= init_optab (UNKNOWN
);
5265 log_optab
= init_optab (UNKNOWN
);
5266 tan_optab
= init_optab (UNKNOWN
);
5267 atan_optab
= init_optab (UNKNOWN
);
5268 strlen_optab
= init_optab (UNKNOWN
);
5269 cbranch_optab
= init_optab (UNKNOWN
);
5270 cmov_optab
= init_optab (UNKNOWN
);
5271 cstore_optab
= init_optab (UNKNOWN
);
5272 push_optab
= init_optab (UNKNOWN
);
5275 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5276 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5277 trunc_optab
= init_convert_optab (TRUNCATE
);
5278 sfix_optab
= init_convert_optab (FIX
);
5279 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5280 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5281 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5282 sfloat_optab
= init_convert_optab (FLOAT
);
5283 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5285 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5287 movstr_optab
[i
] = CODE_FOR_nothing
;
5288 clrstr_optab
[i
] = CODE_FOR_nothing
;
5289 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5290 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5292 #ifdef HAVE_SECONDARY_RELOADS
5293 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5297 /* Fill in the optabs with the insns we support. */
5300 /* Initialize the optabs with the names of the library functions. */
5301 init_integral_libfuncs (add_optab
, "add", '3');
5302 init_floating_libfuncs (add_optab
, "add", '3');
5303 init_integral_libfuncs (addv_optab
, "addv", '3');
5304 init_floating_libfuncs (addv_optab
, "add", '3');
5305 init_integral_libfuncs (sub_optab
, "sub", '3');
5306 init_floating_libfuncs (sub_optab
, "sub", '3');
5307 init_integral_libfuncs (subv_optab
, "subv", '3');
5308 init_floating_libfuncs (subv_optab
, "sub", '3');
5309 init_integral_libfuncs (smul_optab
, "mul", '3');
5310 init_floating_libfuncs (smul_optab
, "mul", '3');
5311 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5312 init_floating_libfuncs (smulv_optab
, "mul", '3');
5313 init_integral_libfuncs (sdiv_optab
, "div", '3');
5314 init_floating_libfuncs (sdiv_optab
, "div", '3');
5315 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5316 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5317 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5318 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5319 init_integral_libfuncs (smod_optab
, "mod", '3');
5320 init_integral_libfuncs (umod_optab
, "umod", '3');
5321 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5322 init_integral_libfuncs (and_optab
, "and", '3');
5323 init_integral_libfuncs (ior_optab
, "ior", '3');
5324 init_integral_libfuncs (xor_optab
, "xor", '3');
5325 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5326 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5327 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5328 init_integral_libfuncs (smin_optab
, "min", '3');
5329 init_floating_libfuncs (smin_optab
, "min", '3');
5330 init_integral_libfuncs (smax_optab
, "max", '3');
5331 init_floating_libfuncs (smax_optab
, "max", '3');
5332 init_integral_libfuncs (umin_optab
, "umin", '3');
5333 init_integral_libfuncs (umax_optab
, "umax", '3');
5334 init_integral_libfuncs (neg_optab
, "neg", '2');
5335 init_floating_libfuncs (neg_optab
, "neg", '2');
5336 init_integral_libfuncs (negv_optab
, "negv", '2');
5337 init_floating_libfuncs (negv_optab
, "neg", '2');
5338 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5339 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5340 init_integral_libfuncs (clz_optab
, "clz", '2');
5341 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5342 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5343 init_integral_libfuncs (parity_optab
, "parity", '2');
5345 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5346 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5347 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5348 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5350 /* EQ etc are floating point only. */
5351 init_floating_libfuncs (eq_optab
, "eq", '2');
5352 init_floating_libfuncs (ne_optab
, "ne", '2');
5353 init_floating_libfuncs (gt_optab
, "gt", '2');
5354 init_floating_libfuncs (ge_optab
, "ge", '2');
5355 init_floating_libfuncs (lt_optab
, "lt", '2');
5356 init_floating_libfuncs (le_optab
, "le", '2');
5357 init_floating_libfuncs (unord_optab
, "unord", '2');
5360 init_interclass_conv_libfuncs (sfloat_optab
, "float", MODE_INT
, MODE_FLOAT
);
5361 init_interclass_conv_libfuncs (sfix_optab
, "fix", MODE_FLOAT
, MODE_INT
);
5362 init_interclass_conv_libfuncs (ufix_optab
, "fixuns", MODE_FLOAT
, MODE_INT
);
5364 /* sext_optab is also used for FLOAT_EXTEND. */
5365 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5366 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5368 /* Use cabs for double complex abs, since systems generally have cabs.
5369 Don't define any libcall for float complex, so that cabs will be used. */
5370 if (complex_double_type_node
)
5371 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5372 = init_one_libfunc ("cabs");
5374 /* The ffs function operates on `int'. */
5375 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5376 = init_one_libfunc ("ffs");
5378 abort_libfunc
= init_one_libfunc ("abort");
5379 memcpy_libfunc
= init_one_libfunc ("memcpy");
5380 memmove_libfunc
= init_one_libfunc ("memmove");
5381 bcopy_libfunc
= init_one_libfunc ("bcopy");
5382 memcmp_libfunc
= init_one_libfunc ("memcmp");
5383 bcmp_libfunc
= init_one_libfunc ("__gcc_bcmp");
5384 memset_libfunc
= init_one_libfunc ("memset");
5385 bzero_libfunc
= init_one_libfunc ("bzero");
5386 setbits_libfunc
= init_one_libfunc ("__setbits");
5388 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5389 ? "_Unwind_SjLj_Resume"
5390 : "_Unwind_Resume");
5391 #ifndef DONT_USE_BUILTIN_SETJMP
5392 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5393 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5395 setjmp_libfunc
= init_one_libfunc ("setjmp");
5396 longjmp_libfunc
= init_one_libfunc ("longjmp");
5398 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5399 unwind_sjlj_unregister_libfunc
5400 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5402 /* For function entry/exit instrumentation. */
5403 profile_function_entry_libfunc
5404 = init_one_libfunc ("__cyg_profile_func_enter");
5405 profile_function_exit_libfunc
5406 = init_one_libfunc ("__cyg_profile_func_exit");
5408 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5409 gcov_init_libfunc
= init_one_libfunc ("__gcov_init");
5411 if (HAVE_conditional_trap
)
5412 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5414 /* Allow the target to add more libcalls or rename some, etc. */
5415 targetm
.init_libfuncs ();
5418 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5419 CODE. Return 0 on failure. */
5422 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5423 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5425 enum machine_mode mode
= GET_MODE (op1
);
5426 enum insn_code icode
;
5429 if (!HAVE_conditional_trap
)
5432 if (mode
== VOIDmode
)
5435 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5436 if (icode
== CODE_FOR_nothing
)
5440 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5441 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5447 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5449 PUT_CODE (trap_rtx
, code
);
5450 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5454 insn
= get_insns ();
5461 #include "gt-optabs.h"