1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_LOAD_EXPR
:
298 return vec_realign_load_optab
;
304 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
308 return trapv
? addv_optab
: add_optab
;
311 return trapv
? subv_optab
: sub_optab
;
314 return trapv
? smulv_optab
: smul_optab
;
317 return trapv
? negv_optab
: neg_optab
;
320 return trapv
? absv_optab
: abs_optab
;
328 /* Generate code to perform an operation specified by TERNARY_OPTAB
329 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
341 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
343 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
344 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
345 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
346 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
349 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
351 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
355 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
356 temp
= gen_reg_rtx (mode
);
360 /* In case the insn wants input operands in modes different from
361 those of the actual operands, convert the operands. It would
362 seem that we don't need to convert CONST_INTs, but we do, so
363 that they're properly zero-extended, sign-extended or truncated
366 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
367 xop0
= convert_modes (mode0
,
368 GET_MODE (op0
) != VOIDmode
373 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
374 xop1
= convert_modes (mode1
,
375 GET_MODE (op1
) != VOIDmode
380 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
381 xop2
= convert_modes (mode2
,
382 GET_MODE (op2
) != VOIDmode
387 /* Now, if insn's predicates don't allow our operands, put them into
390 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
391 && mode0
!= VOIDmode
)
392 xop0
= copy_to_mode_reg (mode0
, xop0
);
394 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
395 && mode1
!= VOIDmode
)
396 xop1
= copy_to_mode_reg (mode1
, xop1
);
398 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
399 && mode2
!= VOIDmode
)
400 xop2
= copy_to_mode_reg (mode2
, xop2
);
402 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
409 /* Like expand_binop, but return a constant rtx if the result can be
410 calculated at compile time. The arguments and return value are
411 otherwise the same as for expand_binop. */
414 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
415 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
416 enum optab_methods methods
)
418 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
419 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
421 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
424 /* Like simplify_expand_binop, but always put the result in TARGET.
425 Return true if the expansion succeeded. */
428 force_expand_binop (enum machine_mode mode
, optab binoptab
,
429 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
430 enum optab_methods methods
)
432 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
433 target
, unsignedp
, methods
);
437 emit_move_insn (target
, x
);
441 /* This subroutine of expand_doubleword_shift handles the cases in which
442 the effective shift value is >= BITS_PER_WORD. The arguments and return
443 value are the same as for the parent routine, except that SUPERWORD_OP1
444 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
445 INTO_TARGET may be null if the caller has decided to calculate it. */
448 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
449 rtx outof_target
, rtx into_target
,
450 int unsignedp
, enum optab_methods methods
)
452 if (into_target
!= 0)
453 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
454 into_target
, unsignedp
, methods
))
457 if (outof_target
!= 0)
459 /* For a signed right shift, we must fill OUTOF_TARGET with copies
460 of the sign bit, otherwise we must fill it with zeros. */
461 if (binoptab
!= ashr_optab
)
462 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
464 if (!force_expand_binop (word_mode
, binoptab
,
465 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
466 outof_target
, unsignedp
, methods
))
472 /* This subroutine of expand_doubleword_shift handles the cases in which
473 the effective shift value is < BITS_PER_WORD. The arguments and return
474 value are the same as for the parent routine. */
477 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
478 rtx outof_input
, rtx into_input
, rtx op1
,
479 rtx outof_target
, rtx into_target
,
480 int unsignedp
, enum optab_methods methods
,
481 unsigned HOST_WIDE_INT shift_mask
)
483 optab reverse_unsigned_shift
, unsigned_shift
;
486 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
487 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
489 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
490 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
491 the opposite direction to BINOPTAB. */
492 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
494 carries
= outof_input
;
495 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
496 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
501 /* We must avoid shifting by BITS_PER_WORD bits since that is either
502 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
503 has unknown behavior. Do a single shift first, then shift by the
504 remainder. It's OK to use ~OP1 as the remainder if shift counts
505 are truncated to the mode size. */
506 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
507 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
508 if (shift_mask
== BITS_PER_WORD
- 1)
510 tmp
= immed_double_const (-1, -1, op1_mode
);
511 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
516 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
517 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
521 if (tmp
== 0 || carries
== 0)
523 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
524 carries
, tmp
, 0, unsignedp
, methods
);
528 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
529 so the result can go directly into INTO_TARGET if convenient. */
530 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
531 into_target
, unsignedp
, methods
);
535 /* Now OR in the bits carried over from OUTOF_INPUT. */
536 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
537 into_target
, unsignedp
, methods
))
540 /* Use a standard word_mode shift for the out-of half. */
541 if (outof_target
!= 0)
542 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
543 outof_target
, unsignedp
, methods
))
550 #ifdef HAVE_conditional_move
551 /* Try implementing expand_doubleword_shift using conditional moves.
552 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
553 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
554 are the shift counts to use in the former and latter case. All other
555 arguments are the same as the parent routine. */
558 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
559 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
560 rtx outof_input
, rtx into_input
,
561 rtx subword_op1
, rtx superword_op1
,
562 rtx outof_target
, rtx into_target
,
563 int unsignedp
, enum optab_methods methods
,
564 unsigned HOST_WIDE_INT shift_mask
)
566 rtx outof_superword
, into_superword
;
568 /* Put the superword version of the output into OUTOF_SUPERWORD and
570 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
571 if (outof_target
!= 0 && subword_op1
== superword_op1
)
573 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
574 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
575 into_superword
= outof_target
;
576 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
577 outof_superword
, 0, unsignedp
, methods
))
582 into_superword
= gen_reg_rtx (word_mode
);
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, into_superword
,
589 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
590 if (!expand_subword_shift (op1_mode
, binoptab
,
591 outof_input
, into_input
, subword_op1
,
592 outof_target
, into_target
,
593 unsignedp
, methods
, shift_mask
))
596 /* Select between them. Do the INTO half first because INTO_SUPERWORD
597 might be the current value of OUTOF_TARGET. */
598 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
599 into_target
, into_superword
, word_mode
, false))
602 if (outof_target
!= 0)
603 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 outof_target
, outof_superword
,
612 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
613 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
614 input operand; the shift moves bits in the direction OUTOF_INPUT->
615 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
616 of the target. OP1 is the shift count and OP1_MODE is its mode.
617 If OP1 is constant, it will have been truncated as appropriate
618 and is known to be nonzero.
620 If SHIFT_MASK is zero, the result of word shifts is undefined when the
621 shift count is outside the range [0, BITS_PER_WORD). This routine must
622 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
624 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
625 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
626 fill with zeros or sign bits as appropriate.
628 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
629 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
630 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
631 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
634 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
635 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
636 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
637 function wants to calculate it itself.
639 Return true if the shift could be successfully synthesized. */
642 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
643 rtx outof_input
, rtx into_input
, rtx op1
,
644 rtx outof_target
, rtx into_target
,
645 int unsignedp
, enum optab_methods methods
,
646 unsigned HOST_WIDE_INT shift_mask
)
648 rtx superword_op1
, tmp
, cmp1
, cmp2
;
649 rtx subword_label
, done_label
;
650 enum rtx_code cmp_code
;
652 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
653 fill the result with sign or zero bits as appropriate. If so, the value
654 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
655 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
656 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
658 This isn't worthwhile for constant shifts since the optimizers will
659 cope better with in-range shift counts. */
660 if (shift_mask
>= BITS_PER_WORD
662 && !CONSTANT_P (op1
))
664 if (!expand_doubleword_shift (op1_mode
, binoptab
,
665 outof_input
, into_input
, op1
,
667 unsignedp
, methods
, shift_mask
))
669 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
670 outof_target
, unsignedp
, methods
))
675 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
676 is true when the effective shift value is less than BITS_PER_WORD.
677 Set SUPERWORD_OP1 to the shift count that should be used to shift
678 OUTOF_INPUT into INTO_TARGET when the condition is false. */
679 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
680 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
682 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
683 is a subword shift count. */
684 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
686 cmp2
= CONST0_RTX (op1_mode
);
692 /* Set CMP1 to OP1 - BITS_PER_WORD. */
693 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
695 cmp2
= CONST0_RTX (op1_mode
);
697 superword_op1
= cmp1
;
702 /* If we can compute the condition at compile time, pick the
703 appropriate subroutine. */
704 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
705 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
707 if (tmp
== const0_rtx
)
708 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
709 outof_target
, into_target
,
712 return expand_subword_shift (op1_mode
, binoptab
,
713 outof_input
, into_input
, op1
,
714 outof_target
, into_target
,
715 unsignedp
, methods
, shift_mask
);
718 #ifdef HAVE_conditional_move
719 /* Try using conditional moves to generate straight-line code. */
721 rtx start
= get_last_insn ();
722 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
723 cmp_code
, cmp1
, cmp2
,
724 outof_input
, into_input
,
726 outof_target
, into_target
,
727 unsignedp
, methods
, shift_mask
))
729 delete_insns_since (start
);
733 /* As a last resort, use branches to select the correct alternative. */
734 subword_label
= gen_label_rtx ();
735 done_label
= gen_label_rtx ();
737 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
738 0, 0, subword_label
);
740 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
741 outof_target
, into_target
,
745 emit_jump_insn (gen_jump (done_label
));
747 emit_label (subword_label
);
749 if (!expand_subword_shift (op1_mode
, binoptab
,
750 outof_input
, into_input
, op1
,
751 outof_target
, into_target
,
752 unsignedp
, methods
, shift_mask
))
755 emit_label (done_label
);
759 /* Wrapper around expand_binop which takes an rtx code to specify
760 the operation to perform, not an optab pointer. All other
761 arguments are the same. */
763 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
764 rtx op1
, rtx target
, int unsignedp
,
765 enum optab_methods methods
)
767 optab binop
= code_to_optab
[(int) code
];
771 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
774 /* Generate code to perform an operation specified by BINOPTAB
775 on operands OP0 and OP1, with result having machine-mode MODE.
777 UNSIGNEDP is for the case where we have to widen the operands
778 to perform the operation. It says to use zero-extension.
780 If TARGET is nonzero, the value
781 is generated there, if it is convenient to do so.
782 In all cases an rtx is returned for the locus of the value;
783 this may or may not be TARGET. */
786 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
787 rtx target
, int unsignedp
, enum optab_methods methods
)
789 enum optab_methods next_methods
790 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
791 ? OPTAB_WIDEN
: methods
);
792 enum mode_class
class;
793 enum machine_mode wider_mode
;
795 int commutative_op
= 0;
796 int shift_op
= (binoptab
->code
== ASHIFT
797 || binoptab
->code
== ASHIFTRT
798 || binoptab
->code
== LSHIFTRT
799 || binoptab
->code
== ROTATE
800 || binoptab
->code
== ROTATERT
);
801 rtx entry_last
= get_last_insn ();
804 class = GET_MODE_CLASS (mode
);
808 /* Load duplicate non-volatile operands once. */
809 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
811 op0
= force_not_mem (op0
);
816 op0
= force_not_mem (op0
);
817 op1
= force_not_mem (op1
);
821 /* If subtracting an integer constant, convert this into an addition of
822 the negated constant. */
824 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
826 op1
= negate_rtx (mode
, op1
);
827 binoptab
= add_optab
;
830 /* If we are inside an appropriately-short loop and we are optimizing,
831 force expensive constants into a register. */
832 if (CONSTANT_P (op0
) && optimize
833 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
835 if (GET_MODE (op0
) != VOIDmode
)
836 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
837 op0
= force_reg (mode
, op0
);
840 if (CONSTANT_P (op1
) && optimize
841 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
843 if (GET_MODE (op1
) != VOIDmode
)
844 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
845 op1
= force_reg (mode
, op1
);
848 /* Record where to delete back to if we backtrack. */
849 last
= get_last_insn ();
851 /* If operation is commutative,
852 try to make the first operand a register.
853 Even better, try to make it the same as the target.
854 Also try to make the last operand a constant. */
855 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
856 || binoptab
== smul_widen_optab
857 || binoptab
== umul_widen_optab
858 || binoptab
== smul_highpart_optab
859 || binoptab
== umul_highpart_optab
)
863 if (((target
== 0 || REG_P (target
))
867 : rtx_equal_p (op1
, target
))
868 || GET_CODE (op0
) == CONST_INT
)
876 /* If we can do it with a three-operand insn, do so. */
878 if (methods
!= OPTAB_MUST_WIDEN
879 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
881 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
882 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
883 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
885 rtx xop0
= op0
, xop1
= op1
;
890 temp
= gen_reg_rtx (mode
);
892 /* If it is a commutative operator and the modes would match
893 if we would swap the operands, we can save the conversions. */
896 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
897 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
901 tmp
= op0
; op0
= op1
; op1
= tmp
;
902 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
906 /* In case the insn wants input operands in modes different from
907 those of the actual operands, convert the operands. It would
908 seem that we don't need to convert CONST_INTs, but we do, so
909 that they're properly zero-extended, sign-extended or truncated
912 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
913 xop0
= convert_modes (mode0
,
914 GET_MODE (op0
) != VOIDmode
919 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
920 xop1
= convert_modes (mode1
,
921 GET_MODE (op1
) != VOIDmode
926 /* Now, if insn's predicates don't allow our operands, put them into
929 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
930 && mode0
!= VOIDmode
)
931 xop0
= copy_to_mode_reg (mode0
, xop0
);
933 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
934 && mode1
!= VOIDmode
)
935 xop1
= copy_to_mode_reg (mode1
, xop1
);
937 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
938 temp
= gen_reg_rtx (mode
);
940 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
943 /* If PAT is composed of more than one insn, try to add an appropriate
944 REG_EQUAL note to it. If we can't because TEMP conflicts with an
945 operand, call ourselves again, this time without a target. */
946 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
947 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
949 delete_insns_since (last
);
950 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
958 delete_insns_since (last
);
961 /* If this is a multiply, see if we can do a widening operation that
962 takes operands of this mode and makes a wider mode. */
964 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
965 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
966 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
967 != CODE_FOR_nothing
))
969 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
970 unsignedp
? umul_widen_optab
: smul_widen_optab
,
971 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
975 if (GET_MODE_CLASS (mode
) == MODE_INT
)
976 return gen_lowpart (mode
, temp
);
978 return convert_to_mode (mode
, temp
, unsignedp
);
982 /* Look for a wider mode of the same class for which we think we
983 can open-code the operation. Check for a widening multiply at the
984 wider mode as well. */
986 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
987 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
988 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
989 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
991 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
992 || (binoptab
== smul_optab
993 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
994 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
995 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
996 != CODE_FOR_nothing
)))
998 rtx xop0
= op0
, xop1
= op1
;
1001 /* For certain integer operations, we need not actually extend
1002 the narrow operands, as long as we will truncate
1003 the results to the same narrowness. */
1005 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1006 || binoptab
== xor_optab
1007 || binoptab
== add_optab
|| binoptab
== sub_optab
1008 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1009 && class == MODE_INT
)
1012 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1014 /* The second operand of a shift must always be extended. */
1015 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1016 no_extend
&& binoptab
!= ashl_optab
);
1018 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1019 unsignedp
, OPTAB_DIRECT
);
1022 if (class != MODE_INT
)
1025 target
= gen_reg_rtx (mode
);
1026 convert_move (target
, temp
, 0);
1030 return gen_lowpart (mode
, temp
);
1033 delete_insns_since (last
);
1037 /* These can be done a word at a time. */
1038 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1039 && class == MODE_INT
1040 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1041 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1047 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1048 won't be accurate, so use a new target. */
1049 if (target
== 0 || target
== op0
|| target
== op1
)
1050 target
= gen_reg_rtx (mode
);
1054 /* Do the actual arithmetic. */
1055 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1057 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1058 rtx x
= expand_binop (word_mode
, binoptab
,
1059 operand_subword_force (op0
, i
, mode
),
1060 operand_subword_force (op1
, i
, mode
),
1061 target_piece
, unsignedp
, next_methods
);
1066 if (target_piece
!= x
)
1067 emit_move_insn (target_piece
, x
);
1070 insns
= get_insns ();
1073 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1075 if (binoptab
->code
!= UNKNOWN
)
1077 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1078 copy_rtx (op0
), copy_rtx (op1
));
1082 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1087 /* Synthesize double word shifts from single word shifts. */
1088 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1089 || binoptab
== ashr_optab
)
1090 && class == MODE_INT
1091 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1092 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1093 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1094 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1095 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1097 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1098 enum machine_mode op1_mode
;
1100 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1101 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1102 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1104 /* Apply the truncation to constant shifts. */
1105 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1106 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1108 if (op1
== CONST0_RTX (op1_mode
))
1111 /* Make sure that this is a combination that expand_doubleword_shift
1112 can handle. See the comments there for details. */
1113 if (double_shift_mask
== 0
1114 || (shift_mask
== BITS_PER_WORD
- 1
1115 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1117 rtx insns
, equiv_value
;
1118 rtx into_target
, outof_target
;
1119 rtx into_input
, outof_input
;
1120 int left_shift
, outof_word
;
1122 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1123 won't be accurate, so use a new target. */
1124 if (target
== 0 || target
== op0
|| target
== op1
)
1125 target
= gen_reg_rtx (mode
);
1129 /* OUTOF_* is the word we are shifting bits away from, and
1130 INTO_* is the word that we are shifting bits towards, thus
1131 they differ depending on the direction of the shift and
1132 WORDS_BIG_ENDIAN. */
1134 left_shift
= binoptab
== ashl_optab
;
1135 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1137 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1138 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1140 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1141 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1143 if (expand_doubleword_shift (op1_mode
, binoptab
,
1144 outof_input
, into_input
, op1
,
1145 outof_target
, into_target
,
1146 unsignedp
, methods
, shift_mask
))
1148 insns
= get_insns ();
1151 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1152 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1159 /* Synthesize double word rotates from single word shifts. */
1160 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1161 && class == MODE_INT
1162 && GET_CODE (op1
) == CONST_INT
1163 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1164 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1165 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1167 rtx insns
, equiv_value
;
1168 rtx into_target
, outof_target
;
1169 rtx into_input
, outof_input
;
1171 int shift_count
, left_shift
, outof_word
;
1173 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1174 won't be accurate, so use a new target. Do this also if target is not
1175 a REG, first because having a register instead may open optimization
1176 opportunities, and second because if target and op0 happen to be MEMs
1177 designating the same location, we would risk clobbering it too early
1178 in the code sequence we generate below. */
1179 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1180 target
= gen_reg_rtx (mode
);
1184 shift_count
= INTVAL (op1
);
1186 /* OUTOF_* is the word we are shifting bits away from, and
1187 INTO_* is the word that we are shifting bits towards, thus
1188 they differ depending on the direction of the shift and
1189 WORDS_BIG_ENDIAN. */
1191 left_shift
= (binoptab
== rotl_optab
);
1192 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1194 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1195 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1197 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1198 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1200 if (shift_count
== BITS_PER_WORD
)
1202 /* This is just a word swap. */
1203 emit_move_insn (outof_target
, into_input
);
1204 emit_move_insn (into_target
, outof_input
);
1209 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1210 rtx first_shift_count
, second_shift_count
;
1211 optab reverse_unsigned_shift
, unsigned_shift
;
1213 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1214 ? lshr_optab
: ashl_optab
);
1216 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1217 ? ashl_optab
: lshr_optab
);
1219 if (shift_count
> BITS_PER_WORD
)
1221 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1222 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1226 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1227 second_shift_count
= GEN_INT (shift_count
);
1230 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1231 outof_input
, first_shift_count
,
1232 NULL_RTX
, unsignedp
, next_methods
);
1233 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1234 into_input
, second_shift_count
,
1235 NULL_RTX
, unsignedp
, next_methods
);
1237 if (into_temp1
!= 0 && into_temp2
!= 0)
1238 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1239 into_target
, unsignedp
, next_methods
);
1243 if (inter
!= 0 && inter
!= into_target
)
1244 emit_move_insn (into_target
, inter
);
1246 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1247 into_input
, first_shift_count
,
1248 NULL_RTX
, unsignedp
, next_methods
);
1249 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1250 outof_input
, second_shift_count
,
1251 NULL_RTX
, unsignedp
, next_methods
);
1253 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1254 inter
= expand_binop (word_mode
, ior_optab
,
1255 outof_temp1
, outof_temp2
,
1256 outof_target
, unsignedp
, next_methods
);
1258 if (inter
!= 0 && inter
!= outof_target
)
1259 emit_move_insn (outof_target
, inter
);
1262 insns
= get_insns ();
1267 if (binoptab
->code
!= UNKNOWN
)
1268 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1272 /* We can't make this a no conflict block if this is a word swap,
1273 because the word swap case fails if the input and output values
1274 are in the same register. */
1275 if (shift_count
!= BITS_PER_WORD
)
1276 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1285 /* These can be done a word at a time by propagating carries. */
1286 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1287 && class == MODE_INT
1288 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1289 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1292 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1293 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1294 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1295 rtx xop0
, xop1
, xtarget
;
1297 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1298 value is one of those, use it. Otherwise, use 1 since it is the
1299 one easiest to get. */
1300 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1301 int normalizep
= STORE_FLAG_VALUE
;
1306 /* Prepare the operands. */
1307 xop0
= force_reg (mode
, op0
);
1308 xop1
= force_reg (mode
, op1
);
1310 xtarget
= gen_reg_rtx (mode
);
1312 if (target
== 0 || !REG_P (target
))
1315 /* Indicate for flow that the entire target reg is being set. */
1317 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1319 /* Do the actual arithmetic. */
1320 for (i
= 0; i
< nwords
; i
++)
1322 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1323 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1324 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1325 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1328 /* Main add/subtract of the input operands. */
1329 x
= expand_binop (word_mode
, binoptab
,
1330 op0_piece
, op1_piece
,
1331 target_piece
, unsignedp
, next_methods
);
1337 /* Store carry from main add/subtract. */
1338 carry_out
= gen_reg_rtx (word_mode
);
1339 carry_out
= emit_store_flag_force (carry_out
,
1340 (binoptab
== add_optab
1343 word_mode
, 1, normalizep
);
1350 /* Add/subtract previous carry to main result. */
1351 newx
= expand_binop (word_mode
,
1352 normalizep
== 1 ? binoptab
: otheroptab
,
1354 NULL_RTX
, 1, next_methods
);
1358 /* Get out carry from adding/subtracting carry in. */
1359 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1360 carry_tmp
= emit_store_flag_force (carry_tmp
,
1361 (binoptab
== add_optab
1364 word_mode
, 1, normalizep
);
1366 /* Logical-ior the two poss. carry together. */
1367 carry_out
= expand_binop (word_mode
, ior_optab
,
1368 carry_out
, carry_tmp
,
1369 carry_out
, 0, next_methods
);
1373 emit_move_insn (target_piece
, newx
);
1376 carry_in
= carry_out
;
1379 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1381 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1382 || ! rtx_equal_p (target
, xtarget
))
1384 rtx temp
= emit_move_insn (target
, xtarget
);
1386 set_unique_reg_note (temp
,
1388 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1399 delete_insns_since (last
);
1402 /* If we want to multiply two two-word values and have normal and widening
1403 multiplies of single-word values, we can do this with three smaller
1404 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1405 because we are not operating on one word at a time.
1407 The multiplication proceeds as follows:
1408 _______________________
1409 [__op0_high_|__op0_low__]
1410 _______________________
1411 * [__op1_high_|__op1_low__]
1412 _______________________________________________
1413 _______________________
1414 (1) [__op0_low__*__op1_low__]
1415 _______________________
1416 (2a) [__op0_low__*__op1_high_]
1417 _______________________
1418 (2b) [__op0_high_*__op1_low__]
1419 _______________________
1420 (3) [__op0_high_*__op1_high_]
1423 This gives a 4-word result. Since we are only interested in the
1424 lower 2 words, partial result (3) and the upper words of (2a) and
1425 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1426 calculated using non-widening multiplication.
1428 (1), however, needs to be calculated with an unsigned widening
1429 multiplication. If this operation is not directly supported we
1430 try using a signed widening multiplication and adjust the result.
1431 This adjustment works as follows:
1433 If both operands are positive then no adjustment is needed.
1435 If the operands have different signs, for example op0_low < 0 and
1436 op1_low >= 0, the instruction treats the most significant bit of
1437 op0_low as a sign bit instead of a bit with significance
1438 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1439 with 2**BITS_PER_WORD - op0_low, and two's complements the
1440 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1443 Similarly, if both operands are negative, we need to add
1444 (op0_low + op1_low) * 2**BITS_PER_WORD.
1446 We use a trick to adjust quickly. We logically shift op0_low right
1447 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1448 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1449 logical shift exists, we do an arithmetic right shift and subtract
1452 if (binoptab
== smul_optab
1453 && class == MODE_INT
1454 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1455 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1456 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1457 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1458 != CODE_FOR_nothing
)
1459 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1460 != CODE_FOR_nothing
)))
1462 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1463 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1464 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1465 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1466 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1467 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1469 rtx op0_xhigh
= NULL_RTX
;
1470 rtx op1_xhigh
= NULL_RTX
;
1472 /* If the target is the same as one of the inputs, don't use it. This
1473 prevents problems with the REG_EQUAL note. */
1474 if (target
== op0
|| target
== op1
1475 || (target
!= 0 && !REG_P (target
)))
1478 /* Multiply the two lower words to get a double-word product.
1479 If unsigned widening multiplication is available, use that;
1480 otherwise use the signed form and compensate. */
1482 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1484 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1485 target
, 1, OPTAB_DIRECT
);
1487 /* If we didn't succeed, delete everything we did so far. */
1489 delete_insns_since (last
);
1491 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1495 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1496 != CODE_FOR_nothing
)
1498 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1499 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1500 target
, 1, OPTAB_DIRECT
);
1501 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1502 NULL_RTX
, 1, next_methods
);
1504 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1505 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1508 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1509 NULL_RTX
, 0, next_methods
);
1511 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1512 op0_xhigh
, op0_xhigh
, 0,
1516 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1517 NULL_RTX
, 1, next_methods
);
1519 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1520 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1523 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1524 NULL_RTX
, 0, next_methods
);
1526 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1527 op1_xhigh
, op1_xhigh
, 0,
1532 /* If we have been able to directly compute the product of the
1533 low-order words of the operands and perform any required adjustments
1534 of the operands, we proceed by trying two more multiplications
1535 and then computing the appropriate sum.
1537 We have checked above that the required addition is provided.
1538 Full-word addition will normally always succeed, especially if
1539 it is provided at all, so we don't worry about its failure. The
1540 multiplication may well fail, however, so we do handle that. */
1542 if (product
&& op0_xhigh
&& op1_xhigh
)
1544 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1545 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1546 NULL_RTX
, 0, OPTAB_DIRECT
);
1548 if (!REG_P (product_high
))
1549 product_high
= force_reg (word_mode
, product_high
);
1552 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1553 product_high
, 0, next_methods
);
1555 if (temp
!= 0 && temp
!= product_high
)
1556 emit_move_insn (product_high
, temp
);
1559 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1560 NULL_RTX
, 0, OPTAB_DIRECT
);
1563 temp
= expand_binop (word_mode
, add_optab
, temp
,
1564 product_high
, product_high
,
1567 if (temp
!= 0 && temp
!= product_high
)
1568 emit_move_insn (product_high
, temp
);
1570 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1574 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1576 temp
= emit_move_insn (product
, product
);
1577 set_unique_reg_note (temp
,
1579 gen_rtx_fmt_ee (MULT
, mode
,
1588 /* If we get here, we couldn't do it for some reason even though we
1589 originally thought we could. Delete anything we've emitted in
1592 delete_insns_since (last
);
1595 /* It can't be open-coded in this mode.
1596 Use a library call if one is available and caller says that's ok. */
1598 if (binoptab
->handlers
[(int) mode
].libfunc
1599 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1603 enum machine_mode op1_mode
= mode
;
1610 op1_mode
= word_mode
;
1611 /* Specify unsigned here,
1612 since negative shift counts are meaningless. */
1613 op1x
= convert_to_mode (word_mode
, op1
, 1);
1616 if (GET_MODE (op0
) != VOIDmode
1617 && GET_MODE (op0
) != mode
)
1618 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1620 /* Pass 1 for NO_QUEUE so we don't lose any increments
1621 if the libcall is cse'd or moved. */
1622 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1623 NULL_RTX
, LCT_CONST
, mode
, 2,
1624 op0
, mode
, op1x
, op1_mode
);
1626 insns
= get_insns ();
1629 target
= gen_reg_rtx (mode
);
1630 emit_libcall_block (insns
, target
, value
,
1631 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1636 delete_insns_since (last
);
1638 /* It can't be done in this mode. Can we do it in a wider mode? */
1640 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1641 || methods
== OPTAB_MUST_WIDEN
))
1643 /* Caller says, don't even try. */
1644 delete_insns_since (entry_last
);
1648 /* Compute the value of METHODS to pass to recursive calls.
1649 Don't allow widening to be tried recursively. */
1651 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1653 /* Look for a wider mode of the same class for which it appears we can do
1656 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1658 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1659 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1661 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1662 != CODE_FOR_nothing
)
1663 || (methods
== OPTAB_LIB
1664 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1666 rtx xop0
= op0
, xop1
= op1
;
1669 /* For certain integer operations, we need not actually extend
1670 the narrow operands, as long as we will truncate
1671 the results to the same narrowness. */
1673 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1674 || binoptab
== xor_optab
1675 || binoptab
== add_optab
|| binoptab
== sub_optab
1676 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1677 && class == MODE_INT
)
1680 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1681 unsignedp
, no_extend
);
1683 /* The second operand of a shift must always be extended. */
1684 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1685 no_extend
&& binoptab
!= ashl_optab
);
1687 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1688 unsignedp
, methods
);
1691 if (class != MODE_INT
)
1694 target
= gen_reg_rtx (mode
);
1695 convert_move (target
, temp
, 0);
1699 return gen_lowpart (mode
, temp
);
1702 delete_insns_since (last
);
1707 delete_insns_since (entry_last
);
1711 /* Expand a binary operator which has both signed and unsigned forms.
1712 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1715 If we widen unsigned operands, we may use a signed wider operation instead
1716 of an unsigned wider operation, since the result would be the same. */
1719 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1720 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1721 enum optab_methods methods
)
1724 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1725 struct optab wide_soptab
;
1727 /* Do it without widening, if possible. */
1728 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1729 unsignedp
, OPTAB_DIRECT
);
1730 if (temp
|| methods
== OPTAB_DIRECT
)
1733 /* Try widening to a signed int. Make a fake signed optab that
1734 hides any signed insn for direct use. */
1735 wide_soptab
= *soptab
;
1736 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1737 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1739 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1740 unsignedp
, OPTAB_WIDEN
);
1742 /* For unsigned operands, try widening to an unsigned int. */
1743 if (temp
== 0 && unsignedp
)
1744 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1745 unsignedp
, OPTAB_WIDEN
);
1746 if (temp
|| methods
== OPTAB_WIDEN
)
1749 /* Use the right width lib call if that exists. */
1750 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1751 if (temp
|| methods
== OPTAB_LIB
)
1754 /* Must widen and use a lib call, use either signed or unsigned. */
1755 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1756 unsignedp
, methods
);
1760 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1761 unsignedp
, methods
);
1765 /* Generate code to perform an operation specified by UNOPPTAB
1766 on operand OP0, with two results to TARG0 and TARG1.
1767 We assume that the order of the operands for the instruction
1768 is TARG0, TARG1, OP0.
1770 Either TARG0 or TARG1 may be zero, but what that means is that
1771 the result is not actually wanted. We will generate it into
1772 a dummy pseudo-reg and discard it. They may not both be zero.
1774 Returns 1 if this operation can be performed; 0 if not. */
1777 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1780 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1781 enum mode_class
class;
1782 enum machine_mode wider_mode
;
1783 rtx entry_last
= get_last_insn ();
1786 class = GET_MODE_CLASS (mode
);
1789 op0
= force_not_mem (op0
);
1792 targ0
= gen_reg_rtx (mode
);
1794 targ1
= gen_reg_rtx (mode
);
1796 /* Record where to go back to if we fail. */
1797 last
= get_last_insn ();
1799 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1801 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1802 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1806 if (GET_MODE (xop0
) != VOIDmode
1807 && GET_MODE (xop0
) != mode0
)
1808 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1810 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1811 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1812 xop0
= copy_to_mode_reg (mode0
, xop0
);
1814 /* We could handle this, but we should always be called with a pseudo
1815 for our targets and all insns should take them as outputs. */
1816 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1817 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1820 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1827 delete_insns_since (last
);
1830 /* It can't be done in this mode. Can we do it in a wider mode? */
1832 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1834 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1835 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1837 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1838 != CODE_FOR_nothing
)
1840 rtx t0
= gen_reg_rtx (wider_mode
);
1841 rtx t1
= gen_reg_rtx (wider_mode
);
1842 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1844 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1846 convert_move (targ0
, t0
, unsignedp
);
1847 convert_move (targ1
, t1
, unsignedp
);
1851 delete_insns_since (last
);
1856 delete_insns_since (entry_last
);
1860 /* Generate code to perform an operation specified by BINOPTAB
1861 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1862 We assume that the order of the operands for the instruction
1863 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1864 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1866 Either TARG0 or TARG1 may be zero, but what that means is that
1867 the result is not actually wanted. We will generate it into
1868 a dummy pseudo-reg and discard it. They may not both be zero.
1870 Returns 1 if this operation can be performed; 0 if not. */
1873 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1876 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1877 enum mode_class
class;
1878 enum machine_mode wider_mode
;
1879 rtx entry_last
= get_last_insn ();
1882 class = GET_MODE_CLASS (mode
);
1886 op0
= force_not_mem (op0
);
1887 op1
= force_not_mem (op1
);
1890 /* If we are inside an appropriately-short loop and we are optimizing,
1891 force expensive constants into a register. */
1892 if (CONSTANT_P (op0
) && optimize
1893 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1894 op0
= force_reg (mode
, op0
);
1896 if (CONSTANT_P (op1
) && optimize
1897 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1898 op1
= force_reg (mode
, op1
);
1901 targ0
= gen_reg_rtx (mode
);
1903 targ1
= gen_reg_rtx (mode
);
1905 /* Record where to go back to if we fail. */
1906 last
= get_last_insn ();
1908 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1910 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1911 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1912 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1914 rtx xop0
= op0
, xop1
= op1
;
1916 /* In case the insn wants input operands in modes different from
1917 those of the actual operands, convert the operands. It would
1918 seem that we don't need to convert CONST_INTs, but we do, so
1919 that they're properly zero-extended, sign-extended or truncated
1922 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1923 xop0
= convert_modes (mode0
,
1924 GET_MODE (op0
) != VOIDmode
1929 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1930 xop1
= convert_modes (mode1
,
1931 GET_MODE (op1
) != VOIDmode
1936 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1937 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1938 xop0
= copy_to_mode_reg (mode0
, xop0
);
1940 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1941 xop1
= copy_to_mode_reg (mode1
, xop1
);
1943 /* We could handle this, but we should always be called with a pseudo
1944 for our targets and all insns should take them as outputs. */
1945 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1946 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1949 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1956 delete_insns_since (last
);
1959 /* It can't be done in this mode. Can we do it in a wider mode? */
1961 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1963 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1964 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1966 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1967 != CODE_FOR_nothing
)
1969 rtx t0
= gen_reg_rtx (wider_mode
);
1970 rtx t1
= gen_reg_rtx (wider_mode
);
1971 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1972 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1974 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1977 convert_move (targ0
, t0
, unsignedp
);
1978 convert_move (targ1
, t1
, unsignedp
);
1982 delete_insns_since (last
);
1987 delete_insns_since (entry_last
);
1991 /* Expand the two-valued library call indicated by BINOPTAB, but
1992 preserve only one of the values. If TARG0 is non-NULL, the first
1993 value is placed into TARG0; otherwise the second value is placed
1994 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
1995 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
1996 This routine assumes that the value returned by the library call is
1997 as if the return value was of an integral mode twice as wide as the
1998 mode of OP0. Returns 1 if the call was successful. */
2001 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2002 rtx targ0
, rtx targ1
, enum rtx_code code
)
2004 enum machine_mode mode
;
2005 enum machine_mode libval_mode
;
2009 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2010 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2013 mode
= GET_MODE (op0
);
2014 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2017 /* The value returned by the library function will have twice as
2018 many bits as the nominal MODE. */
2019 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2022 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2023 NULL_RTX
, LCT_CONST
,
2027 /* Get the part of VAL containing the value that we want. */
2028 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2029 targ0
? 0 : GET_MODE_SIZE (mode
));
2030 insns
= get_insns ();
2032 /* Move the into the desired location. */
2033 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2034 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2040 /* Wrapper around expand_unop which takes an rtx code to specify
2041 the operation to perform, not an optab pointer. All other
2042 arguments are the same. */
2044 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2045 rtx target
, int unsignedp
)
2047 optab unop
= code_to_optab
[(int) code
];
2051 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2057 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2059 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2061 enum mode_class
class = GET_MODE_CLASS (mode
);
2062 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2064 enum machine_mode wider_mode
;
2065 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2066 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2068 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2069 != CODE_FOR_nothing
)
2071 rtx xop0
, temp
, last
;
2073 last
= get_last_insn ();
2076 target
= gen_reg_rtx (mode
);
2077 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2078 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2080 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2081 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2082 - GET_MODE_BITSIZE (mode
)),
2083 target
, true, OPTAB_DIRECT
);
2085 delete_insns_since (last
);
2094 /* Try calculating (parity x) as (and (popcount x) 1), where
2095 popcount can also be done in a wider mode. */
2097 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2099 enum mode_class
class = GET_MODE_CLASS (mode
);
2100 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2102 enum machine_mode wider_mode
;
2103 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2104 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2106 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2107 != CODE_FOR_nothing
)
2109 rtx xop0
, temp
, last
;
2111 last
= get_last_insn ();
2114 target
= gen_reg_rtx (mode
);
2115 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2116 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2119 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2120 target
, true, OPTAB_DIRECT
);
2122 delete_insns_since (last
);
2131 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2132 conditions, VAL may already be a SUBREG against which we cannot generate
2133 a further SUBREG. In this case, we expect forcing the value into a
2134 register will work around the situation. */
2137 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2138 enum machine_mode imode
)
2141 ret
= lowpart_subreg (omode
, val
, imode
);
2144 val
= force_reg (imode
, val
);
2145 ret
= lowpart_subreg (omode
, val
, imode
);
2146 gcc_assert (ret
!= NULL
);
2151 /* Generate code to perform an operation specified by UNOPTAB
2152 on operand OP0, with result having machine-mode MODE.
2154 UNSIGNEDP is for the case where we have to widen the operands
2155 to perform the operation. It says to use zero-extension.
2157 If TARGET is nonzero, the value
2158 is generated there, if it is convenient to do so.
2159 In all cases an rtx is returned for the locus of the value;
2160 this may or may not be TARGET. */
2163 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2166 enum mode_class
class;
2167 enum machine_mode wider_mode
;
2169 rtx last
= get_last_insn ();
2172 class = GET_MODE_CLASS (mode
);
2175 op0
= force_not_mem (op0
);
2177 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2179 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2180 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2186 temp
= gen_reg_rtx (mode
);
2188 if (GET_MODE (xop0
) != VOIDmode
2189 && GET_MODE (xop0
) != mode0
)
2190 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2192 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2194 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2195 xop0
= copy_to_mode_reg (mode0
, xop0
);
2197 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2198 temp
= gen_reg_rtx (mode
);
2200 pat
= GEN_FCN (icode
) (temp
, xop0
);
2203 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2204 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2206 delete_insns_since (last
);
2207 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2215 delete_insns_since (last
);
2218 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2220 /* Widening clz needs special treatment. */
2221 if (unoptab
== clz_optab
)
2223 temp
= widen_clz (mode
, op0
, target
);
2230 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2231 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2232 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2234 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2238 /* For certain operations, we need not actually extend
2239 the narrow operand, as long as we will truncate the
2240 results to the same narrowness. */
2242 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2243 (unoptab
== neg_optab
2244 || unoptab
== one_cmpl_optab
)
2245 && class == MODE_INT
);
2247 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2252 if (class != MODE_INT
)
2255 target
= gen_reg_rtx (mode
);
2256 convert_move (target
, temp
, 0);
2260 return gen_lowpart (mode
, temp
);
2263 delete_insns_since (last
);
2267 /* These can be done a word at a time. */
2268 if (unoptab
== one_cmpl_optab
2269 && class == MODE_INT
2270 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2271 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2276 if (target
== 0 || target
== op0
)
2277 target
= gen_reg_rtx (mode
);
2281 /* Do the actual arithmetic. */
2282 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2284 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2285 rtx x
= expand_unop (word_mode
, unoptab
,
2286 operand_subword_force (op0
, i
, mode
),
2287 target_piece
, unsignedp
);
2289 if (target_piece
!= x
)
2290 emit_move_insn (target_piece
, x
);
2293 insns
= get_insns ();
2296 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2297 gen_rtx_fmt_e (unoptab
->code
, mode
,
2302 /* Try negating floating point values by flipping the sign bit. */
2303 if (unoptab
->code
== NEG
&& class == MODE_FLOAT
2304 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2306 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2307 enum machine_mode imode
= int_mode_for_mode (mode
);
2308 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2310 if (imode
!= BLKmode
&& bitpos
>= 0 && fmt
->has_signed_zero
)
2312 HOST_WIDE_INT hi
, lo
;
2313 rtx last
= get_last_insn ();
2315 /* Handle targets with different FP word orders. */
2316 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2318 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2319 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2320 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2323 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2326 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2330 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2333 temp
= expand_binop (imode
, xor_optab
,
2334 gen_lowpart (imode
, op0
),
2335 immed_double_const (lo
, hi
, imode
),
2336 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2341 target
= gen_reg_rtx (mode
);
2342 temp
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2343 insn
= emit_move_insn (target
, temp
);
2344 set_unique_reg_note (insn
, REG_EQUAL
,
2345 gen_rtx_fmt_e (NEG
, mode
,
2349 delete_insns_since (last
);
2353 /* Try calculating parity (x) as popcount (x) % 2. */
2354 if (unoptab
== parity_optab
)
2356 temp
= expand_parity (mode
, op0
, target
);
2361 /* If there is no negation pattern, try subtracting from zero. */
2362 if (unoptab
== neg_optab
&& class == MODE_INT
)
2364 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2365 target
, unsignedp
, OPTAB_DIRECT
);
2371 /* Now try a library call in this mode. */
2372 if (unoptab
->handlers
[(int) mode
].libfunc
)
2376 enum machine_mode outmode
= mode
;
2378 /* All of these functions return small values. Thus we choose to
2379 have them return something that isn't a double-word. */
2380 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2381 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2383 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2387 /* Pass 1 for NO_QUEUE so we don't lose any increments
2388 if the libcall is cse'd or moved. */
2389 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2390 NULL_RTX
, LCT_CONST
, outmode
,
2392 insns
= get_insns ();
2395 target
= gen_reg_rtx (outmode
);
2396 emit_libcall_block (insns
, target
, value
,
2397 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2402 /* It can't be done in this mode. Can we do it in a wider mode? */
2404 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2406 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2407 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2409 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2410 != CODE_FOR_nothing
)
2411 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2415 /* For certain operations, we need not actually extend
2416 the narrow operand, as long as we will truncate the
2417 results to the same narrowness. */
2419 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2420 (unoptab
== neg_optab
2421 || unoptab
== one_cmpl_optab
)
2422 && class == MODE_INT
);
2424 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2427 /* If we are generating clz using wider mode, adjust the
2429 if (unoptab
== clz_optab
&& temp
!= 0)
2430 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2431 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2432 - GET_MODE_BITSIZE (mode
)),
2433 target
, true, OPTAB_DIRECT
);
2437 if (class != MODE_INT
)
2440 target
= gen_reg_rtx (mode
);
2441 convert_move (target
, temp
, 0);
2445 return gen_lowpart (mode
, temp
);
2448 delete_insns_since (last
);
2453 /* If there is no negate operation, try doing a subtract from zero.
2454 The US Software GOFAST library needs this. FIXME: This is *wrong*
2455 for floating-point operations due to negative zeros! */
2456 if (unoptab
->code
== NEG
)
2459 temp
= expand_binop (mode
,
2460 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2461 CONST0_RTX (mode
), op0
,
2462 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2470 /* Emit code to compute the absolute value of OP0, with result to
2471 TARGET if convenient. (TARGET may be 0.) The return value says
2472 where the result actually is to be found.
2474 MODE is the mode of the operand; the mode of the result is
2475 different but can be deduced from MODE.
2480 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2481 int result_unsignedp
)
2486 result_unsignedp
= 1;
2488 /* First try to do it with a special abs instruction. */
2489 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2494 /* For floating point modes, try clearing the sign bit. */
2495 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2496 && GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
)
2498 const struct real_format
*fmt
= REAL_MODE_FORMAT (mode
);
2499 enum machine_mode imode
= int_mode_for_mode (mode
);
2500 int bitpos
= (fmt
!= 0) ? fmt
->signbit
: -1;
2502 if (imode
!= BLKmode
&& bitpos
>= 0)
2504 HOST_WIDE_INT hi
, lo
;
2505 rtx last
= get_last_insn ();
2507 /* Handle targets with different FP word orders. */
2508 if (FLOAT_WORDS_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
2510 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
2511 int word
= nwords
- (bitpos
/ BITS_PER_WORD
) - 1;
2512 bitpos
= word
* BITS_PER_WORD
+ bitpos
% BITS_PER_WORD
;
2515 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2518 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2522 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2525 temp
= expand_binop (imode
, and_optab
,
2526 gen_lowpart (imode
, op0
),
2527 immed_double_const (~lo
, ~hi
, imode
),
2528 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2533 target
= gen_reg_rtx (mode
);
2534 temp
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2535 insn
= emit_move_insn (target
, temp
);
2536 set_unique_reg_note (insn
, REG_EQUAL
,
2537 gen_rtx_fmt_e (ABS
, mode
,
2541 delete_insns_since (last
);
2545 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2546 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2548 rtx last
= get_last_insn ();
2550 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2552 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2558 delete_insns_since (last
);
2561 /* If this machine has expensive jumps, we can do integer absolute
2562 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2563 where W is the width of MODE. */
2565 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2567 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2568 size_int (GET_MODE_BITSIZE (mode
) - 1),
2571 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2574 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2575 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2585 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2586 int result_unsignedp
, int safe
)
2591 result_unsignedp
= 1;
2593 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2597 /* If that does not win, use conditional jump and negate. */
2599 /* It is safe to use the target if it is the same
2600 as the source if this is also a pseudo register */
2601 if (op0
== target
&& REG_P (op0
)
2602 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2605 op1
= gen_label_rtx ();
2606 if (target
== 0 || ! safe
2607 || GET_MODE (target
) != mode
2608 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2610 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2611 target
= gen_reg_rtx (mode
);
2613 emit_move_insn (target
, op0
);
2616 /* If this mode is an integer too wide to compare properly,
2617 compare word by word. Rely on CSE to optimize constant cases. */
2618 if (GET_MODE_CLASS (mode
) == MODE_INT
2619 && ! can_compare_p (GE
, mode
, ccp_jump
))
2620 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2623 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2624 NULL_RTX
, NULL_RTX
, op1
);
2626 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2629 emit_move_insn (target
, op0
);
2635 /* Generate an instruction whose insn-code is INSN_CODE,
2636 with two operands: an output TARGET and an input OP0.
2637 TARGET *must* be nonzero, and the output is always stored there.
2638 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2639 the value that is stored into TARGET. */
2642 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2645 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2650 /* Sign and zero extension from memory is often done specially on
2651 RISC machines, so forcing into a register here can pessimize
2653 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2654 op0
= force_not_mem (op0
);
2656 /* Now, if insn does not accept our operands, put them into pseudos. */
2658 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2659 op0
= copy_to_mode_reg (mode0
, op0
);
2661 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2662 || (flag_force_mem
&& MEM_P (temp
)))
2663 temp
= gen_reg_rtx (GET_MODE (temp
));
2665 pat
= GEN_FCN (icode
) (temp
, op0
);
2667 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2668 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2673 emit_move_insn (target
, temp
);
2676 /* Emit code to perform a series of operations on a multi-word quantity, one
2679 Such a block is preceded by a CLOBBER of the output, consists of multiple
2680 insns, each setting one word of the output, and followed by a SET copying
2681 the output to itself.
2683 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2684 note indicating that it doesn't conflict with the (also multi-word)
2685 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2688 INSNS is a block of code generated to perform the operation, not including
2689 the CLOBBER and final copy. All insns that compute intermediate values
2690 are first emitted, followed by the block as described above.
2692 TARGET, OP0, and OP1 are the output and inputs of the operations,
2693 respectively. OP1 may be zero for a unary operation.
2695 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2698 If TARGET is not a register, INSNS is simply emitted with no special
2699 processing. Likewise if anything in INSNS is not an INSN or if
2700 there is a libcall block inside INSNS.
2702 The final insn emitted is returned. */
2705 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2707 rtx prev
, next
, first
, last
, insn
;
2709 if (!REG_P (target
) || reload_in_progress
)
2710 return emit_insn (insns
);
2712 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2713 if (!NONJUMP_INSN_P (insn
)
2714 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2715 return emit_insn (insns
);
2717 /* First emit all insns that do not store into words of the output and remove
2718 these from the list. */
2719 for (insn
= insns
; insn
; insn
= next
)
2724 next
= NEXT_INSN (insn
);
2726 /* Some ports (cris) create a libcall regions at their own. We must
2727 avoid any potential nesting of LIBCALLs. */
2728 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2729 remove_note (insn
, note
);
2730 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2731 remove_note (insn
, note
);
2733 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2734 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2735 set
= PATTERN (insn
);
2736 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2738 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2739 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2741 set
= XVECEXP (PATTERN (insn
), 0, i
);
2749 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2751 if (PREV_INSN (insn
))
2752 NEXT_INSN (PREV_INSN (insn
)) = next
;
2757 PREV_INSN (next
) = PREV_INSN (insn
);
2763 prev
= get_last_insn ();
2765 /* Now write the CLOBBER of the output, followed by the setting of each
2766 of the words, followed by the final copy. */
2767 if (target
!= op0
&& target
!= op1
)
2768 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
2770 for (insn
= insns
; insn
; insn
= next
)
2772 next
= NEXT_INSN (insn
);
2775 if (op1
&& REG_P (op1
))
2776 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
2779 if (op0
&& REG_P (op0
))
2780 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
2784 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2785 != CODE_FOR_nothing
)
2787 last
= emit_move_insn (target
, target
);
2789 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
2793 last
= get_last_insn ();
2795 /* Remove any existing REG_EQUAL note from "last", or else it will
2796 be mistaken for a note referring to the full contents of the
2797 alleged libcall value when found together with the REG_RETVAL
2798 note added below. An existing note can come from an insn
2799 expansion at "last". */
2800 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2804 first
= get_insns ();
2806 first
= NEXT_INSN (prev
);
2808 /* Encapsulate the block so it gets manipulated as a unit. */
2809 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2811 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
2816 /* Emit code to make a call to a constant function or a library call.
2818 INSNS is a list containing all insns emitted in the call.
2819 These insns leave the result in RESULT. Our block is to copy RESULT
2820 to TARGET, which is logically equivalent to EQUIV.
2822 We first emit any insns that set a pseudo on the assumption that these are
2823 loading constants into registers; doing so allows them to be safely cse'ed
2824 between blocks. Then we emit all the other insns in the block, followed by
2825 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2826 note with an operand of EQUIV.
2828 Moving assignments to pseudos outside of the block is done to improve
2829 the generated code, but is not required to generate correct code,
2830 hence being unable to move an assignment is not grounds for not making
2831 a libcall block. There are two reasons why it is safe to leave these
2832 insns inside the block: First, we know that these pseudos cannot be
2833 used in generated RTL outside the block since they are created for
2834 temporary purposes within the block. Second, CSE will not record the
2835 values of anything set inside a libcall block, so we know they must
2836 be dead at the end of the block.
2838 Except for the first group of insns (the ones setting pseudos), the
2839 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2842 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
2844 rtx final_dest
= target
;
2845 rtx prev
, next
, first
, last
, insn
;
2847 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
2848 into a MEM later. Protect the libcall block from this change. */
2849 if (! REG_P (target
) || REG_USERVAR_P (target
))
2850 target
= gen_reg_rtx (GET_MODE (target
));
2852 /* If we're using non-call exceptions, a libcall corresponding to an
2853 operation that may trap may also trap. */
2854 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
2856 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2859 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2861 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
2862 remove_note (insn
, note
);
2866 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
2867 reg note to indicate that this call cannot throw or execute a nonlocal
2868 goto (unless there is already a REG_EH_REGION note, in which case
2870 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2873 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
2876 XEXP (note
, 0) = constm1_rtx
;
2878 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
2882 /* First emit all insns that set pseudos. Remove them from the list as
2883 we go. Avoid insns that set pseudos which were referenced in previous
2884 insns. These can be generated by move_by_pieces, for example,
2885 to update an address. Similarly, avoid insns that reference things
2886 set in previous insns. */
2888 for (insn
= insns
; insn
; insn
= next
)
2890 rtx set
= single_set (insn
);
2893 /* Some ports (cris) create a libcall regions at their own. We must
2894 avoid any potential nesting of LIBCALLs. */
2895 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2896 remove_note (insn
, note
);
2897 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2898 remove_note (insn
, note
);
2900 next
= NEXT_INSN (insn
);
2902 if (set
!= 0 && REG_P (SET_DEST (set
))
2903 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2905 || ((! INSN_P(insns
)
2906 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
2907 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2908 && ! modified_in_p (SET_SRC (set
), insns
)
2909 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2911 if (PREV_INSN (insn
))
2912 NEXT_INSN (PREV_INSN (insn
)) = next
;
2917 PREV_INSN (next
) = PREV_INSN (insn
);
2922 /* Some ports use a loop to copy large arguments onto the stack.
2923 Don't move anything outside such a loop. */
2928 prev
= get_last_insn ();
2930 /* Write the remaining insns followed by the final copy. */
2932 for (insn
= insns
; insn
; insn
= next
)
2934 next
= NEXT_INSN (insn
);
2939 last
= emit_move_insn (target
, result
);
2940 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2941 != CODE_FOR_nothing
)
2942 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
2945 /* Remove any existing REG_EQUAL note from "last", or else it will
2946 be mistaken for a note referring to the full contents of the
2947 libcall value when found together with the REG_RETVAL note added
2948 below. An existing note can come from an insn expansion at
2950 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
2953 if (final_dest
!= target
)
2954 emit_move_insn (final_dest
, target
);
2957 first
= get_insns ();
2959 first
= NEXT_INSN (prev
);
2961 /* Encapsulate the block so it gets manipulated as a unit. */
2962 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
2964 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
2965 when the encapsulated region would not be in one basic block,
2966 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
2968 bool attach_libcall_retval_notes
= true;
2969 next
= NEXT_INSN (last
);
2970 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
2971 if (control_flow_insn_p (insn
))
2973 attach_libcall_retval_notes
= false;
2977 if (attach_libcall_retval_notes
)
2979 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
2981 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
2987 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
2988 PURPOSE describes how this comparison will be used. CODE is the rtx
2989 comparison code we will be using.
2991 ??? Actually, CODE is slightly weaker than that. A target is still
2992 required to implement all of the normal bcc operations, but not
2993 required to implement all (or any) of the unordered bcc operations. */
2996 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
2997 enum can_compare_purpose purpose
)
3001 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3003 if (purpose
== ccp_jump
)
3004 return bcc_gen_fctn
[(int) code
] != NULL
;
3005 else if (purpose
== ccp_store_flag
)
3006 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3008 /* There's only one cmov entry point, and it's allowed to fail. */
3011 if (purpose
== ccp_jump
3012 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3014 if (purpose
== ccp_cmov
3015 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3017 if (purpose
== ccp_store_flag
3018 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3020 mode
= GET_MODE_WIDER_MODE (mode
);
3022 while (mode
!= VOIDmode
);
3027 /* This function is called when we are going to emit a compare instruction that
3028 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3030 *PMODE is the mode of the inputs (in case they are const_int).
3031 *PUNSIGNEDP nonzero says that the operands are unsigned;
3032 this matters if they need to be widened.
3034 If they have mode BLKmode, then SIZE specifies the size of both operands.
3036 This function performs all the setup necessary so that the caller only has
3037 to emit a single comparison insn. This setup can involve doing a BLKmode
3038 comparison or emitting a library call to perform the comparison if no insn
3039 is available to handle it.
3040 The values which are passed in through pointers can be modified; the caller
3041 should perform the comparison on the modified values. */
3044 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3045 enum machine_mode
*pmode
, int *punsignedp
,
3046 enum can_compare_purpose purpose
)
3048 enum machine_mode mode
= *pmode
;
3049 rtx x
= *px
, y
= *py
;
3050 int unsignedp
= *punsignedp
;
3051 enum mode_class
class;
3053 class = GET_MODE_CLASS (mode
);
3055 /* They could both be VOIDmode if both args are immediate constants,
3056 but we should fold that at an earlier stage.
3057 With no special code here, this will call abort,
3058 reminding the programmer to implement such folding. */
3060 if (mode
!= BLKmode
&& flag_force_mem
)
3062 /* Load duplicate non-volatile operands once. */
3063 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3065 x
= force_not_mem (x
);
3070 x
= force_not_mem (x
);
3071 y
= force_not_mem (y
);
3075 /* If we are inside an appropriately-short loop and we are optimizing,
3076 force expensive constants into a register. */
3077 if (CONSTANT_P (x
) && optimize
3078 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3079 x
= force_reg (mode
, x
);
3081 if (CONSTANT_P (y
) && optimize
3082 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3083 y
= force_reg (mode
, y
);
3086 /* Abort if we have a non-canonical comparison. The RTL documentation
3087 states that canonical comparisons are required only for targets which
3089 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3093 /* Don't let both operands fail to indicate the mode. */
3094 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3095 x
= force_reg (mode
, x
);
3097 /* Handle all BLKmode compares. */
3099 if (mode
== BLKmode
)
3101 enum machine_mode cmp_mode
, result_mode
;
3102 enum insn_code cmp_code
;
3107 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3112 /* Try to use a memory block compare insn - either cmpstr
3113 or cmpmem will do. */
3114 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3115 cmp_mode
!= VOIDmode
;
3116 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3118 cmp_code
= cmpmem_optab
[cmp_mode
];
3119 if (cmp_code
== CODE_FOR_nothing
)
3120 cmp_code
= cmpstr_optab
[cmp_mode
];
3121 if (cmp_code
== CODE_FOR_nothing
)
3124 /* Must make sure the size fits the insn's mode. */
3125 if ((GET_CODE (size
) == CONST_INT
3126 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3127 || (GET_MODE_BITSIZE (GET_MODE (size
))
3128 > GET_MODE_BITSIZE (cmp_mode
)))
3131 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3132 result
= gen_reg_rtx (result_mode
);
3133 size
= convert_to_mode (cmp_mode
, size
, 1);
3134 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3138 *pmode
= result_mode
;
3142 /* Otherwise call a library function, memcmp. */
3143 libfunc
= memcmp_libfunc
;
3144 length_type
= sizetype
;
3145 result_mode
= TYPE_MODE (integer_type_node
);
3146 cmp_mode
= TYPE_MODE (length_type
);
3147 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3148 TYPE_UNSIGNED (length_type
));
3150 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3157 *pmode
= result_mode
;
3161 /* Don't allow operands to the compare to trap, as that can put the
3162 compare and branch in different basic blocks. */
3163 if (flag_non_call_exceptions
)
3166 x
= force_reg (mode
, x
);
3168 y
= force_reg (mode
, y
);
3173 if (can_compare_p (*pcomparison
, mode
, purpose
))
3176 /* Handle a lib call just for the mode we are using. */
3178 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3180 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3183 /* If we want unsigned, and this mode has a distinct unsigned
3184 comparison routine, use that. */
3185 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3186 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3188 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3189 word_mode
, 2, x
, mode
, y
, mode
);
3193 if (TARGET_LIB_INT_CMP_BIASED
)
3194 /* Integer comparison returns a result that must be compared
3195 against 1, so that even if we do an unsigned compare
3196 afterward, there is still a value that can represent the
3197 result "less than". */
3207 if (class == MODE_FLOAT
)
3208 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3214 /* Before emitting an insn with code ICODE, make sure that X, which is going
3215 to be used for operand OPNUM of the insn, is converted from mode MODE to
3216 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3217 that it is accepted by the operand predicate. Return the new value. */
3220 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3221 enum machine_mode wider_mode
, int unsignedp
)
3223 if (mode
!= wider_mode
)
3224 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3226 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3227 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3231 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3237 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3238 we can do the comparison.
3239 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3240 be NULL_RTX which indicates that only a comparison is to be generated. */
3243 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3244 enum rtx_code comparison
, int unsignedp
, rtx label
)
3246 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3247 enum mode_class
class = GET_MODE_CLASS (mode
);
3248 enum machine_mode wider_mode
= mode
;
3250 /* Try combined insns first. */
3253 enum insn_code icode
;
3254 PUT_MODE (test
, wider_mode
);
3258 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3260 if (icode
!= CODE_FOR_nothing
3261 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3263 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3264 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3265 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3270 /* Handle some compares against zero. */
3271 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3272 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3274 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3275 emit_insn (GEN_FCN (icode
) (x
));
3277 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3281 /* Handle compares for which there is a directly suitable insn. */
3283 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3284 if (icode
!= CODE_FOR_nothing
)
3286 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3287 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3288 emit_insn (GEN_FCN (icode
) (x
, y
));
3290 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3294 if (class != MODE_INT
&& class != MODE_FLOAT
3295 && class != MODE_COMPLEX_FLOAT
)
3298 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3300 while (wider_mode
!= VOIDmode
);
3305 /* Generate code to compare X with Y so that the condition codes are
3306 set and to jump to LABEL if the condition is true. If X is a
3307 constant and Y is not a constant, then the comparison is swapped to
3308 ensure that the comparison RTL has the canonical form.
3310 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3311 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3312 the proper branch condition code.
3314 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3316 MODE is the mode of the inputs (in case they are const_int).
3318 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3319 be passed unchanged to emit_cmp_insn, then potentially converted into an
3320 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3323 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3324 enum machine_mode mode
, int unsignedp
, rtx label
)
3326 rtx op0
= x
, op1
= y
;
3328 /* Swap operands and condition to ensure canonical RTL. */
3329 if (swap_commutative_operands_p (x
, y
))
3331 /* If we're not emitting a branch, this means some caller
3337 comparison
= swap_condition (comparison
);
3341 /* If OP0 is still a constant, then both X and Y must be constants. Force
3342 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3344 if (CONSTANT_P (op0
))
3345 op0
= force_reg (mode
, op0
);
3349 comparison
= unsigned_condition (comparison
);
3351 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3353 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3356 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3359 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3360 enum machine_mode mode
, int unsignedp
)
3362 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3365 /* Emit a library call comparison between floating point X and Y.
3366 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3369 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3370 enum machine_mode
*pmode
, int *punsignedp
)
3372 enum rtx_code comparison
= *pcomparison
;
3373 enum rtx_code swapped
= swap_condition (comparison
);
3374 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3377 enum machine_mode orig_mode
= GET_MODE (x
);
3378 enum machine_mode mode
;
3379 rtx value
, target
, insns
, equiv
;
3381 bool reversed_p
= false;
3383 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3385 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3388 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3391 tmp
= x
; x
= y
; y
= tmp
;
3392 comparison
= swapped
;
3396 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3397 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3399 comparison
= reversed
;
3405 if (mode
== VOIDmode
)
3408 if (mode
!= orig_mode
)
3410 x
= convert_to_mode (mode
, x
, 0);
3411 y
= convert_to_mode (mode
, y
, 0);
3414 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3415 the RTL. The allows the RTL optimizers to delete the libcall if the
3416 condition can be determined at compile-time. */
3417 if (comparison
== UNORDERED
)
3419 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3420 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3421 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3422 temp
, const_true_rtx
, equiv
);
3426 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3427 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3429 rtx true_rtx
, false_rtx
;
3434 true_rtx
= const0_rtx
;
3435 false_rtx
= const_true_rtx
;
3439 true_rtx
= const_true_rtx
;
3440 false_rtx
= const0_rtx
;
3444 true_rtx
= const1_rtx
;
3445 false_rtx
= const0_rtx
;
3449 true_rtx
= const0_rtx
;
3450 false_rtx
= constm1_rtx
;
3454 true_rtx
= constm1_rtx
;
3455 false_rtx
= const0_rtx
;
3459 true_rtx
= const0_rtx
;
3460 false_rtx
= const1_rtx
;
3466 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3467 equiv
, true_rtx
, false_rtx
);
3472 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3473 word_mode
, 2, x
, mode
, y
, mode
);
3474 insns
= get_insns ();
3477 target
= gen_reg_rtx (word_mode
);
3478 emit_libcall_block (insns
, target
, value
, equiv
);
3480 if (comparison
== UNORDERED
3481 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3482 comparison
= reversed_p
? EQ
: NE
;
3487 *pcomparison
= comparison
;
3491 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3494 emit_indirect_jump (rtx loc
)
3496 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3498 loc
= copy_to_mode_reg (Pmode
, loc
);
3500 emit_jump_insn (gen_indirect_jump (loc
));
3504 #ifdef HAVE_conditional_move
3506 /* Emit a conditional move instruction if the machine supports one for that
3507 condition and machine mode.
3509 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3510 the mode to use should they be constants. If it is VOIDmode, they cannot
3513 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3514 should be stored there. MODE is the mode to use should they be constants.
3515 If it is VOIDmode, they cannot both be constants.
3517 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3518 is not supported. */
3521 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3522 enum machine_mode cmode
, rtx op2
, rtx op3
,
3523 enum machine_mode mode
, int unsignedp
)
3525 rtx tem
, subtarget
, comparison
, insn
;
3526 enum insn_code icode
;
3527 enum rtx_code reversed
;
3529 /* If one operand is constant, make it the second one. Only do this
3530 if the other operand is not constant as well. */
3532 if (swap_commutative_operands_p (op0
, op1
))
3537 code
= swap_condition (code
);
3540 /* get_condition will prefer to generate LT and GT even if the old
3541 comparison was against zero, so undo that canonicalization here since
3542 comparisons against zero are cheaper. */
3543 if (code
== LT
&& op1
== const1_rtx
)
3544 code
= LE
, op1
= const0_rtx
;
3545 else if (code
== GT
&& op1
== constm1_rtx
)
3546 code
= GE
, op1
= const0_rtx
;
3548 if (cmode
== VOIDmode
)
3549 cmode
= GET_MODE (op0
);
3551 if (swap_commutative_operands_p (op2
, op3
)
3552 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3561 if (mode
== VOIDmode
)
3562 mode
= GET_MODE (op2
);
3564 icode
= movcc_gen_code
[mode
];
3566 if (icode
== CODE_FOR_nothing
)
3571 op2
= force_not_mem (op2
);
3572 op3
= force_not_mem (op3
);
3576 target
= gen_reg_rtx (mode
);
3580 /* If the insn doesn't accept these operands, put them in pseudos. */
3582 if (! (*insn_data
[icode
].operand
[0].predicate
)
3583 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3584 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3586 if (! (*insn_data
[icode
].operand
[2].predicate
)
3587 (op2
, insn_data
[icode
].operand
[2].mode
))
3588 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3590 if (! (*insn_data
[icode
].operand
[3].predicate
)
3591 (op3
, insn_data
[icode
].operand
[3].mode
))
3592 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3594 /* Everything should now be in the suitable form, so emit the compare insn
3595 and then the conditional move. */
3598 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3600 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3601 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3602 return NULL and let the caller figure out how best to deal with this
3604 if (GET_CODE (comparison
) != code
)
3607 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3609 /* If that failed, then give up. */
3615 if (subtarget
!= target
)
3616 convert_move (target
, subtarget
, 0);
3621 /* Return nonzero if a conditional move of mode MODE is supported.
3623 This function is for combine so it can tell whether an insn that looks
3624 like a conditional move is actually supported by the hardware. If we
3625 guess wrong we lose a bit on optimization, but that's it. */
3626 /* ??? sparc64 supports conditionally moving integers values based on fp
3627 comparisons, and vice versa. How do we handle them? */
3630 can_conditionally_move_p (enum machine_mode mode
)
3632 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3638 #endif /* HAVE_conditional_move */
3640 /* Emit a conditional addition instruction if the machine supports one for that
3641 condition and machine mode.
3643 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3644 the mode to use should they be constants. If it is VOIDmode, they cannot
3647 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3648 should be stored there. MODE is the mode to use should they be constants.
3649 If it is VOIDmode, they cannot both be constants.
3651 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3652 is not supported. */
3655 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3656 enum machine_mode cmode
, rtx op2
, rtx op3
,
3657 enum machine_mode mode
, int unsignedp
)
3659 rtx tem
, subtarget
, comparison
, insn
;
3660 enum insn_code icode
;
3661 enum rtx_code reversed
;
3663 /* If one operand is constant, make it the second one. Only do this
3664 if the other operand is not constant as well. */
3666 if (swap_commutative_operands_p (op0
, op1
))
3671 code
= swap_condition (code
);
3674 /* get_condition will prefer to generate LT and GT even if the old
3675 comparison was against zero, so undo that canonicalization here since
3676 comparisons against zero are cheaper. */
3677 if (code
== LT
&& op1
== const1_rtx
)
3678 code
= LE
, op1
= const0_rtx
;
3679 else if (code
== GT
&& op1
== constm1_rtx
)
3680 code
= GE
, op1
= const0_rtx
;
3682 if (cmode
== VOIDmode
)
3683 cmode
= GET_MODE (op0
);
3685 if (swap_commutative_operands_p (op2
, op3
)
3686 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3695 if (mode
== VOIDmode
)
3696 mode
= GET_MODE (op2
);
3698 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3700 if (icode
== CODE_FOR_nothing
)
3705 op2
= force_not_mem (op2
);
3706 op3
= force_not_mem (op3
);
3710 target
= gen_reg_rtx (mode
);
3712 /* If the insn doesn't accept these operands, put them in pseudos. */
3714 if (! (*insn_data
[icode
].operand
[0].predicate
)
3715 (target
, insn_data
[icode
].operand
[0].mode
))
3716 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3720 if (! (*insn_data
[icode
].operand
[2].predicate
)
3721 (op2
, insn_data
[icode
].operand
[2].mode
))
3722 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3724 if (! (*insn_data
[icode
].operand
[3].predicate
)
3725 (op3
, insn_data
[icode
].operand
[3].mode
))
3726 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3728 /* Everything should now be in the suitable form, so emit the compare insn
3729 and then the conditional move. */
3732 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3734 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3735 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3736 return NULL and let the caller figure out how best to deal with this
3738 if (GET_CODE (comparison
) != code
)
3741 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3743 /* If that failed, then give up. */
3749 if (subtarget
!= target
)
3750 convert_move (target
, subtarget
, 0);
3755 /* These functions attempt to generate an insn body, rather than
3756 emitting the insn, but if the gen function already emits them, we
3757 make no attempt to turn them back into naked patterns. */
3759 /* Generate and return an insn body to add Y to X. */
3762 gen_add2_insn (rtx x
, rtx y
)
3764 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3766 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3767 (x
, insn_data
[icode
].operand
[0].mode
))
3768 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3769 (x
, insn_data
[icode
].operand
[1].mode
))
3770 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3771 (y
, insn_data
[icode
].operand
[2].mode
)))
3774 return (GEN_FCN (icode
) (x
, x
, y
));
3777 /* Generate and return an insn body to add r1 and c,
3778 storing the result in r0. */
3780 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
3782 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3784 if (icode
== CODE_FOR_nothing
3785 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3786 (r0
, insn_data
[icode
].operand
[0].mode
))
3787 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3788 (r1
, insn_data
[icode
].operand
[1].mode
))
3789 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3790 (c
, insn_data
[icode
].operand
[2].mode
)))
3793 return (GEN_FCN (icode
) (r0
, r1
, c
));
3797 have_add2_insn (rtx x
, rtx y
)
3801 if (GET_MODE (x
) == VOIDmode
)
3804 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3806 if (icode
== CODE_FOR_nothing
)
3809 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3810 (x
, insn_data
[icode
].operand
[0].mode
))
3811 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3812 (x
, insn_data
[icode
].operand
[1].mode
))
3813 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3814 (y
, insn_data
[icode
].operand
[2].mode
)))
3820 /* Generate and return an insn body to subtract Y from X. */
3823 gen_sub2_insn (rtx x
, rtx y
)
3825 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3827 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3828 (x
, insn_data
[icode
].operand
[0].mode
))
3829 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3830 (x
, insn_data
[icode
].operand
[1].mode
))
3831 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3832 (y
, insn_data
[icode
].operand
[2].mode
)))
3835 return (GEN_FCN (icode
) (x
, x
, y
));
3838 /* Generate and return an insn body to subtract r1 and c,
3839 storing the result in r0. */
3841 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
3843 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
3845 if (icode
== CODE_FOR_nothing
3846 || ! ((*insn_data
[icode
].operand
[0].predicate
)
3847 (r0
, insn_data
[icode
].operand
[0].mode
))
3848 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3849 (r1
, insn_data
[icode
].operand
[1].mode
))
3850 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3851 (c
, insn_data
[icode
].operand
[2].mode
)))
3854 return (GEN_FCN (icode
) (r0
, r1
, c
));
3858 have_sub2_insn (rtx x
, rtx y
)
3862 if (GET_MODE (x
) == VOIDmode
)
3865 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3867 if (icode
== CODE_FOR_nothing
)
3870 if (! ((*insn_data
[icode
].operand
[0].predicate
)
3871 (x
, insn_data
[icode
].operand
[0].mode
))
3872 || ! ((*insn_data
[icode
].operand
[1].predicate
)
3873 (x
, insn_data
[icode
].operand
[1].mode
))
3874 || ! ((*insn_data
[icode
].operand
[2].predicate
)
3875 (y
, insn_data
[icode
].operand
[2].mode
)))
3881 /* Generate the body of an instruction to copy Y into X.
3882 It may be a list of insns, if one insn isn't enough. */
3885 gen_move_insn (rtx x
, rtx y
)
3890 emit_move_insn_1 (x
, y
);
3896 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3897 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3898 no such operation exists, CODE_FOR_nothing will be returned. */
3901 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
3905 #ifdef HAVE_ptr_extend
3907 return CODE_FOR_ptr_extend
;
3910 tab
= unsignedp
? zext_optab
: sext_optab
;
3911 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
3914 /* Generate the body of an insn to extend Y (with mode MFROM)
3915 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3918 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
3919 enum machine_mode mfrom
, int unsignedp
)
3921 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
3922 return GEN_FCN (icode
) (x
, y
);
3925 /* can_fix_p and can_float_p say whether the target machine
3926 can directly convert a given fixed point type to
3927 a given floating point type, or vice versa.
3928 The returned value is the CODE_FOR_... value to use,
3929 or CODE_FOR_nothing if these modes cannot be directly converted.
3931 *TRUNCP_PTR is set to 1 if it is necessary to output
3932 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3934 static enum insn_code
3935 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
3936 int unsignedp
, int *truncp_ptr
)
3939 enum insn_code icode
;
3941 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
3942 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
3943 if (icode
!= CODE_FOR_nothing
)
3949 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
3950 for this to work. We need to rework the fix* and ftrunc* patterns
3951 and documentation. */
3952 tab
= unsignedp
? ufix_optab
: sfix_optab
;
3953 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
3954 if (icode
!= CODE_FOR_nothing
3955 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
3962 return CODE_FOR_nothing
;
3965 static enum insn_code
3966 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
3971 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
3972 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
3975 /* Generate code to convert FROM to floating point
3976 and store in TO. FROM must be fixed point and not VOIDmode.
3977 UNSIGNEDP nonzero means regard FROM as unsigned.
3978 Normally this is done by correcting the final value
3979 if it is negative. */
3982 expand_float (rtx to
, rtx from
, int unsignedp
)
3984 enum insn_code icode
;
3986 enum machine_mode fmode
, imode
;
3988 /* Crash now, because we won't be able to decide which mode to use. */
3989 if (GET_MODE (from
) == VOIDmode
)
3992 /* Look for an insn to do the conversion. Do it in the specified
3993 modes if possible; otherwise convert either input, output or both to
3994 wider mode. If the integer mode is wider than the mode of FROM,
3995 we can do the conversion signed even if the input is unsigned. */
3997 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3998 fmode
= GET_MODE_WIDER_MODE (fmode
))
3999 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4000 imode
= GET_MODE_WIDER_MODE (imode
))
4002 int doing_unsigned
= unsignedp
;
4004 if (fmode
!= GET_MODE (to
)
4005 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4008 icode
= can_float_p (fmode
, imode
, unsignedp
);
4009 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4010 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4012 if (icode
!= CODE_FOR_nothing
)
4014 if (imode
!= GET_MODE (from
))
4015 from
= convert_to_mode (imode
, from
, unsignedp
);
4017 if (fmode
!= GET_MODE (to
))
4018 target
= gen_reg_rtx (fmode
);
4020 emit_unop_insn (icode
, target
, from
,
4021 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4024 convert_move (to
, target
, 0);
4029 /* Unsigned integer, and no way to convert directly.
4030 Convert as signed, then conditionally adjust the result. */
4033 rtx label
= gen_label_rtx ();
4035 REAL_VALUE_TYPE offset
;
4038 from
= force_not_mem (from
);
4040 /* Look for a usable floating mode FMODE wider than the source and at
4041 least as wide as the target. Using FMODE will avoid rounding woes
4042 with unsigned values greater than the signed maximum value. */
4044 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4045 fmode
= GET_MODE_WIDER_MODE (fmode
))
4046 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4047 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4050 if (fmode
== VOIDmode
)
4052 /* There is no such mode. Pretend the target is wide enough. */
4053 fmode
= GET_MODE (to
);
4055 /* Avoid double-rounding when TO is narrower than FROM. */
4056 if ((significand_size (fmode
) + 1)
4057 < GET_MODE_BITSIZE (GET_MODE (from
)))
4060 rtx neglabel
= gen_label_rtx ();
4062 /* Don't use TARGET if it isn't a register, is a hard register,
4063 or is the wrong mode. */
4065 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4066 || GET_MODE (target
) != fmode
)
4067 target
= gen_reg_rtx (fmode
);
4069 imode
= GET_MODE (from
);
4070 do_pending_stack_adjust ();
4072 /* Test whether the sign bit is set. */
4073 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4076 /* The sign bit is not set. Convert as signed. */
4077 expand_float (target
, from
, 0);
4078 emit_jump_insn (gen_jump (label
));
4081 /* The sign bit is set.
4082 Convert to a usable (positive signed) value by shifting right
4083 one bit, while remembering if a nonzero bit was shifted
4084 out; i.e., compute (from & 1) | (from >> 1). */
4086 emit_label (neglabel
);
4087 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4088 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4089 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4091 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4093 expand_float (target
, temp
, 0);
4095 /* Multiply by 2 to undo the shift above. */
4096 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4097 target
, 0, OPTAB_LIB_WIDEN
);
4099 emit_move_insn (target
, temp
);
4101 do_pending_stack_adjust ();
4107 /* If we are about to do some arithmetic to correct for an
4108 unsigned operand, do it in a pseudo-register. */
4110 if (GET_MODE (to
) != fmode
4111 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4112 target
= gen_reg_rtx (fmode
);
4114 /* Convert as signed integer to floating. */
4115 expand_float (target
, from
, 0);
4117 /* If FROM is negative (and therefore TO is negative),
4118 correct its value by 2**bitwidth. */
4120 do_pending_stack_adjust ();
4121 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4125 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4126 temp
= expand_binop (fmode
, add_optab
, target
,
4127 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4128 target
, 0, OPTAB_LIB_WIDEN
);
4130 emit_move_insn (target
, temp
);
4132 do_pending_stack_adjust ();
4137 /* No hardware instruction available; call a library routine. */
4142 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4144 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4145 from
= convert_to_mode (SImode
, from
, unsignedp
);
4148 from
= force_not_mem (from
);
4150 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4156 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4157 GET_MODE (to
), 1, from
,
4159 insns
= get_insns ();
4162 emit_libcall_block (insns
, target
, value
,
4163 gen_rtx_FLOAT (GET_MODE (to
), from
));
4168 /* Copy result to requested destination
4169 if we have been computing in a temp location. */
4173 if (GET_MODE (target
) == GET_MODE (to
))
4174 emit_move_insn (to
, target
);
4176 convert_move (to
, target
, 0);
4180 /* Generate code to convert FROM to fixed point and store in TO. FROM
4181 must be floating point. */
4184 expand_fix (rtx to
, rtx from
, int unsignedp
)
4186 enum insn_code icode
;
4188 enum machine_mode fmode
, imode
;
4191 /* We first try to find a pair of modes, one real and one integer, at
4192 least as wide as FROM and TO, respectively, in which we can open-code
4193 this conversion. If the integer mode is wider than the mode of TO,
4194 we can do the conversion either signed or unsigned. */
4196 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4197 fmode
= GET_MODE_WIDER_MODE (fmode
))
4198 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4199 imode
= GET_MODE_WIDER_MODE (imode
))
4201 int doing_unsigned
= unsignedp
;
4203 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4204 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4205 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4207 if (icode
!= CODE_FOR_nothing
)
4209 if (fmode
!= GET_MODE (from
))
4210 from
= convert_to_mode (fmode
, from
, 0);
4214 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4215 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4219 if (imode
!= GET_MODE (to
))
4220 target
= gen_reg_rtx (imode
);
4222 emit_unop_insn (icode
, target
, from
,
4223 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4225 convert_move (to
, target
, unsignedp
);
4230 /* For an unsigned conversion, there is one more way to do it.
4231 If we have a signed conversion, we generate code that compares
4232 the real value to the largest representable positive number. If if
4233 is smaller, the conversion is done normally. Otherwise, subtract
4234 one plus the highest signed number, convert, and add it back.
4236 We only need to check all real modes, since we know we didn't find
4237 anything with a wider integer mode.
4239 This code used to extend FP value into mode wider than the destination.
4240 This is not needed. Consider, for instance conversion from SFmode
4243 The hot path trought the code is dealing with inputs smaller than 2^63
4244 and doing just the conversion, so there is no bits to lose.
4246 In the other path we know the value is positive in the range 2^63..2^64-1
4247 inclusive. (as for other imput overflow happens and result is undefined)
4248 So we know that the most important bit set in mantissa corresponds to
4249 2^63. The subtraction of 2^63 should not generate any rounding as it
4250 simply clears out that bit. The rest is trivial. */
4252 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4253 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4254 fmode
= GET_MODE_WIDER_MODE (fmode
))
4255 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4259 REAL_VALUE_TYPE offset
;
4260 rtx limit
, lab1
, lab2
, insn
;
4262 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4263 real_2expN (&offset
, bitsize
- 1);
4264 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4265 lab1
= gen_label_rtx ();
4266 lab2
= gen_label_rtx ();
4269 from
= force_not_mem (from
);
4271 if (fmode
!= GET_MODE (from
))
4272 from
= convert_to_mode (fmode
, from
, 0);
4274 /* See if we need to do the subtraction. */
4275 do_pending_stack_adjust ();
4276 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4279 /* If not, do the signed "fix" and branch around fixup code. */
4280 expand_fix (to
, from
, 0);
4281 emit_jump_insn (gen_jump (lab2
));
4284 /* Otherwise, subtract 2**(N-1), convert to signed number,
4285 then add 2**(N-1). Do the addition using XOR since this
4286 will often generate better code. */
4288 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4289 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4290 expand_fix (to
, target
, 0);
4291 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4293 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4295 to
, 1, OPTAB_LIB_WIDEN
);
4298 emit_move_insn (to
, target
);
4302 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4303 != CODE_FOR_nothing
)
4305 /* Make a place for a REG_NOTE and add it. */
4306 insn
= emit_move_insn (to
, to
);
4307 set_unique_reg_note (insn
,
4309 gen_rtx_fmt_e (UNSIGNED_FIX
,
4317 /* We can't do it with an insn, so use a library call. But first ensure
4318 that the mode of TO is at least as wide as SImode, since those are the
4319 only library calls we know about. */
4321 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4323 target
= gen_reg_rtx (SImode
);
4325 expand_fix (target
, from
, unsignedp
);
4333 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4334 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4339 from
= force_not_mem (from
);
4343 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4344 GET_MODE (to
), 1, from
,
4346 insns
= get_insns ();
4349 emit_libcall_block (insns
, target
, value
,
4350 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4351 GET_MODE (to
), from
));
4356 if (GET_MODE (to
) == GET_MODE (target
))
4357 emit_move_insn (to
, target
);
4359 convert_move (to
, target
, 0);
4363 /* Report whether we have an instruction to perform the operation
4364 specified by CODE on operands of mode MODE. */
4366 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4368 return (code_to_optab
[(int) code
] != 0
4369 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4370 != CODE_FOR_nothing
));
4373 /* Create a blank optab. */
4378 optab op
= ggc_alloc (sizeof (struct optab
));
4379 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4381 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4382 op
->handlers
[i
].libfunc
= 0;
4388 static convert_optab
4389 new_convert_optab (void)
4392 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4393 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4394 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4396 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4397 op
->handlers
[i
][j
].libfunc
= 0;
4402 /* Same, but fill in its code as CODE, and write it into the
4403 code_to_optab table. */
4405 init_optab (enum rtx_code code
)
4407 optab op
= new_optab ();
4409 code_to_optab
[(int) code
] = op
;
4413 /* Same, but fill in its code as CODE, and do _not_ write it into
4414 the code_to_optab table. */
4416 init_optabv (enum rtx_code code
)
4418 optab op
= new_optab ();
4423 /* Conversion optabs never go in the code_to_optab table. */
4424 static inline convert_optab
4425 init_convert_optab (enum rtx_code code
)
4427 convert_optab op
= new_convert_optab ();
4432 /* Initialize the libfunc fields of an entire group of entries in some
4433 optab. Each entry is set equal to a string consisting of a leading
4434 pair of underscores followed by a generic operation name followed by
4435 a mode name (downshifted to lowercase) followed by a single character
4436 representing the number of operands for the given operation (which is
4437 usually one of the characters '2', '3', or '4').
4439 OPTABLE is the table in which libfunc fields are to be initialized.
4440 FIRST_MODE is the first machine mode index in the given optab to
4442 LAST_MODE is the last machine mode index in the given optab to
4444 OPNAME is the generic (string) name of the operation.
4445 SUFFIX is the character which specifies the number of operands for
4446 the given generic operation.
4450 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4451 const char *opname
, int suffix
)
4454 unsigned opname_len
= strlen (opname
);
4456 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4457 mode
= (enum machine_mode
) ((int) mode
+ 1))
4459 const char *mname
= GET_MODE_NAME (mode
);
4460 unsigned mname_len
= strlen (mname
);
4461 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4468 for (q
= opname
; *q
; )
4470 for (q
= mname
; *q
; q
++)
4471 *p
++ = TOLOWER (*q
);
4475 optable
->handlers
[(int) mode
].libfunc
4476 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4480 /* Initialize the libfunc fields of an entire group of entries in some
4481 optab which correspond to all integer mode operations. The parameters
4482 have the same meaning as similarly named ones for the `init_libfuncs'
4483 routine. (See above). */
4486 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4488 int maxsize
= 2*BITS_PER_WORD
;
4489 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4490 maxsize
= LONG_LONG_TYPE_SIZE
;
4491 init_libfuncs (optable
, word_mode
,
4492 mode_for_size (maxsize
, MODE_INT
, 0),
4496 /* Initialize the libfunc fields of an entire group of entries in some
4497 optab which correspond to all real mode operations. The parameters
4498 have the same meaning as similarly named ones for the `init_libfuncs'
4499 routine. (See above). */
4502 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4504 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4507 /* Initialize the libfunc fields of an entire group of entries of an
4508 inter-mode-class conversion optab. The string formation rules are
4509 similar to the ones for init_libfuncs, above, but instead of having
4510 a mode name and an operand count these functions have two mode names
4511 and no operand count. */
4513 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4514 enum mode_class from_class
,
4515 enum mode_class to_class
)
4517 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4518 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4519 size_t opname_len
= strlen (opname
);
4520 size_t max_mname_len
= 0;
4522 enum machine_mode fmode
, tmode
;
4523 const char *fname
, *tname
;
4525 char *libfunc_name
, *suffix
;
4528 for (fmode
= first_from_mode
;
4530 fmode
= GET_MODE_WIDER_MODE (fmode
))
4531 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4533 for (tmode
= first_to_mode
;
4535 tmode
= GET_MODE_WIDER_MODE (tmode
))
4536 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4538 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4539 libfunc_name
[0] = '_';
4540 libfunc_name
[1] = '_';
4541 memcpy (&libfunc_name
[2], opname
, opname_len
);
4542 suffix
= libfunc_name
+ opname_len
+ 2;
4544 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4545 fmode
= GET_MODE_WIDER_MODE (fmode
))
4546 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4547 tmode
= GET_MODE_WIDER_MODE (tmode
))
4549 fname
= GET_MODE_NAME (fmode
);
4550 tname
= GET_MODE_NAME (tmode
);
4553 for (q
= fname
; *q
; p
++, q
++)
4555 for (q
= tname
; *q
; p
++, q
++)
4560 tab
->handlers
[tmode
][fmode
].libfunc
4561 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4566 /* Initialize the libfunc fields of an entire group of entries of an
4567 intra-mode-class conversion optab. The string formation rules are
4568 similar to the ones for init_libfunc, above. WIDENING says whether
4569 the optab goes from narrow to wide modes or vice versa. These functions
4570 have two mode names _and_ an operand count. */
4572 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4573 enum mode_class
class, bool widening
)
4575 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4576 size_t opname_len
= strlen (opname
);
4577 size_t max_mname_len
= 0;
4579 enum machine_mode nmode
, wmode
;
4580 const char *nname
, *wname
;
4582 char *libfunc_name
, *suffix
;
4585 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4586 nmode
= GET_MODE_WIDER_MODE (nmode
))
4587 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4589 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4590 libfunc_name
[0] = '_';
4591 libfunc_name
[1] = '_';
4592 memcpy (&libfunc_name
[2], opname
, opname_len
);
4593 suffix
= libfunc_name
+ opname_len
+ 2;
4595 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4596 nmode
= GET_MODE_WIDER_MODE (nmode
))
4597 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4598 wmode
= GET_MODE_WIDER_MODE (wmode
))
4600 nname
= GET_MODE_NAME (nmode
);
4601 wname
= GET_MODE_NAME (wmode
);
4604 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4606 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4612 tab
->handlers
[widening
? wmode
: nmode
]
4613 [widening
? nmode
: wmode
].libfunc
4614 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4621 init_one_libfunc (const char *name
)
4625 /* Create a FUNCTION_DECL that can be passed to
4626 targetm.encode_section_info. */
4627 /* ??? We don't have any type information except for this is
4628 a function. Pretend this is "int foo()". */
4629 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4630 build_function_type (integer_type_node
, NULL_TREE
));
4631 DECL_ARTIFICIAL (decl
) = 1;
4632 DECL_EXTERNAL (decl
) = 1;
4633 TREE_PUBLIC (decl
) = 1;
4635 symbol
= XEXP (DECL_RTL (decl
), 0);
4637 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4638 are the flags assigned by targetm.encode_section_info. */
4639 SYMBOL_REF_DECL (symbol
) = 0;
4644 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4645 MODE to NAME, which should be either 0 or a string constant. */
4647 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4650 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4652 optable
->handlers
[mode
].libfunc
= 0;
4655 /* Call this to reset the function entry for one conversion optab
4656 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4657 either 0 or a string constant. */
4659 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4660 enum machine_mode fmode
, const char *name
)
4663 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4665 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4668 /* Call this once to initialize the contents of the optabs
4669 appropriately for the current target machine. */
4676 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4678 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4679 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4681 #ifdef HAVE_conditional_move
4682 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4683 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4686 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4688 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4689 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4692 add_optab
= init_optab (PLUS
);
4693 addv_optab
= init_optabv (PLUS
);
4694 sub_optab
= init_optab (MINUS
);
4695 subv_optab
= init_optabv (MINUS
);
4696 smul_optab
= init_optab (MULT
);
4697 smulv_optab
= init_optabv (MULT
);
4698 smul_highpart_optab
= init_optab (UNKNOWN
);
4699 umul_highpart_optab
= init_optab (UNKNOWN
);
4700 smul_widen_optab
= init_optab (UNKNOWN
);
4701 umul_widen_optab
= init_optab (UNKNOWN
);
4702 sdiv_optab
= init_optab (DIV
);
4703 sdivv_optab
= init_optabv (DIV
);
4704 sdivmod_optab
= init_optab (UNKNOWN
);
4705 udiv_optab
= init_optab (UDIV
);
4706 udivmod_optab
= init_optab (UNKNOWN
);
4707 smod_optab
= init_optab (MOD
);
4708 umod_optab
= init_optab (UMOD
);
4709 fmod_optab
= init_optab (UNKNOWN
);
4710 drem_optab
= init_optab (UNKNOWN
);
4711 ftrunc_optab
= init_optab (UNKNOWN
);
4712 and_optab
= init_optab (AND
);
4713 ior_optab
= init_optab (IOR
);
4714 xor_optab
= init_optab (XOR
);
4715 ashl_optab
= init_optab (ASHIFT
);
4716 ashr_optab
= init_optab (ASHIFTRT
);
4717 lshr_optab
= init_optab (LSHIFTRT
);
4718 rotl_optab
= init_optab (ROTATE
);
4719 rotr_optab
= init_optab (ROTATERT
);
4720 smin_optab
= init_optab (SMIN
);
4721 smax_optab
= init_optab (SMAX
);
4722 umin_optab
= init_optab (UMIN
);
4723 umax_optab
= init_optab (UMAX
);
4724 pow_optab
= init_optab (UNKNOWN
);
4725 atan2_optab
= init_optab (UNKNOWN
);
4727 /* These three have codes assigned exclusively for the sake of
4729 mov_optab
= init_optab (SET
);
4730 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4731 cmp_optab
= init_optab (COMPARE
);
4733 ucmp_optab
= init_optab (UNKNOWN
);
4734 tst_optab
= init_optab (UNKNOWN
);
4736 eq_optab
= init_optab (EQ
);
4737 ne_optab
= init_optab (NE
);
4738 gt_optab
= init_optab (GT
);
4739 ge_optab
= init_optab (GE
);
4740 lt_optab
= init_optab (LT
);
4741 le_optab
= init_optab (LE
);
4742 unord_optab
= init_optab (UNORDERED
);
4744 neg_optab
= init_optab (NEG
);
4745 negv_optab
= init_optabv (NEG
);
4746 abs_optab
= init_optab (ABS
);
4747 absv_optab
= init_optabv (ABS
);
4748 addcc_optab
= init_optab (UNKNOWN
);
4749 one_cmpl_optab
= init_optab (NOT
);
4750 ffs_optab
= init_optab (FFS
);
4751 clz_optab
= init_optab (CLZ
);
4752 ctz_optab
= init_optab (CTZ
);
4753 popcount_optab
= init_optab (POPCOUNT
);
4754 parity_optab
= init_optab (PARITY
);
4755 sqrt_optab
= init_optab (SQRT
);
4756 floor_optab
= init_optab (UNKNOWN
);
4757 ceil_optab
= init_optab (UNKNOWN
);
4758 round_optab
= init_optab (UNKNOWN
);
4759 btrunc_optab
= init_optab (UNKNOWN
);
4760 nearbyint_optab
= init_optab (UNKNOWN
);
4761 rint_optab
= init_optab (UNKNOWN
);
4762 sincos_optab
= init_optab (UNKNOWN
);
4763 sin_optab
= init_optab (UNKNOWN
);
4764 asin_optab
= init_optab (UNKNOWN
);
4765 cos_optab
= init_optab (UNKNOWN
);
4766 acos_optab
= init_optab (UNKNOWN
);
4767 exp_optab
= init_optab (UNKNOWN
);
4768 exp10_optab
= init_optab (UNKNOWN
);
4769 exp2_optab
= init_optab (UNKNOWN
);
4770 expm1_optab
= init_optab (UNKNOWN
);
4771 logb_optab
= init_optab (UNKNOWN
);
4772 ilogb_optab
= init_optab (UNKNOWN
);
4773 log_optab
= init_optab (UNKNOWN
);
4774 log10_optab
= init_optab (UNKNOWN
);
4775 log2_optab
= init_optab (UNKNOWN
);
4776 log1p_optab
= init_optab (UNKNOWN
);
4777 tan_optab
= init_optab (UNKNOWN
);
4778 atan_optab
= init_optab (UNKNOWN
);
4779 strlen_optab
= init_optab (UNKNOWN
);
4780 cbranch_optab
= init_optab (UNKNOWN
);
4781 cmov_optab
= init_optab (UNKNOWN
);
4782 cstore_optab
= init_optab (UNKNOWN
);
4783 push_optab
= init_optab (UNKNOWN
);
4785 vec_extract_optab
= init_optab (UNKNOWN
);
4786 vec_set_optab
= init_optab (UNKNOWN
);
4787 vec_init_optab
= init_optab (UNKNOWN
);
4788 vec_realign_load_optab
= init_optab (UNKNOWN
);
4789 movmisalign_optab
= init_optab (UNKNOWN
);
4792 sext_optab
= init_convert_optab (SIGN_EXTEND
);
4793 zext_optab
= init_convert_optab (ZERO_EXTEND
);
4794 trunc_optab
= init_convert_optab (TRUNCATE
);
4795 sfix_optab
= init_convert_optab (FIX
);
4796 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
4797 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
4798 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
4799 sfloat_optab
= init_convert_optab (FLOAT
);
4800 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
4802 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4804 movmem_optab
[i
] = CODE_FOR_nothing
;
4805 clrmem_optab
[i
] = CODE_FOR_nothing
;
4806 cmpstr_optab
[i
] = CODE_FOR_nothing
;
4807 cmpmem_optab
[i
] = CODE_FOR_nothing
;
4809 #ifdef HAVE_SECONDARY_RELOADS
4810 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4814 /* Fill in the optabs with the insns we support. */
4817 /* Initialize the optabs with the names of the library functions. */
4818 init_integral_libfuncs (add_optab
, "add", '3');
4819 init_floating_libfuncs (add_optab
, "add", '3');
4820 init_integral_libfuncs (addv_optab
, "addv", '3');
4821 init_floating_libfuncs (addv_optab
, "add", '3');
4822 init_integral_libfuncs (sub_optab
, "sub", '3');
4823 init_floating_libfuncs (sub_optab
, "sub", '3');
4824 init_integral_libfuncs (subv_optab
, "subv", '3');
4825 init_floating_libfuncs (subv_optab
, "sub", '3');
4826 init_integral_libfuncs (smul_optab
, "mul", '3');
4827 init_floating_libfuncs (smul_optab
, "mul", '3');
4828 init_integral_libfuncs (smulv_optab
, "mulv", '3');
4829 init_floating_libfuncs (smulv_optab
, "mul", '3');
4830 init_integral_libfuncs (sdiv_optab
, "div", '3');
4831 init_floating_libfuncs (sdiv_optab
, "div", '3');
4832 init_integral_libfuncs (sdivv_optab
, "divv", '3');
4833 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4834 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4835 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4836 init_integral_libfuncs (smod_optab
, "mod", '3');
4837 init_integral_libfuncs (umod_optab
, "umod", '3');
4838 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4839 init_integral_libfuncs (and_optab
, "and", '3');
4840 init_integral_libfuncs (ior_optab
, "ior", '3');
4841 init_integral_libfuncs (xor_optab
, "xor", '3');
4842 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4843 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4844 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4845 init_integral_libfuncs (smin_optab
, "min", '3');
4846 init_floating_libfuncs (smin_optab
, "min", '3');
4847 init_integral_libfuncs (smax_optab
, "max", '3');
4848 init_floating_libfuncs (smax_optab
, "max", '3');
4849 init_integral_libfuncs (umin_optab
, "umin", '3');
4850 init_integral_libfuncs (umax_optab
, "umax", '3');
4851 init_integral_libfuncs (neg_optab
, "neg", '2');
4852 init_floating_libfuncs (neg_optab
, "neg", '2');
4853 init_integral_libfuncs (negv_optab
, "negv", '2');
4854 init_floating_libfuncs (negv_optab
, "neg", '2');
4855 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4856 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4857 init_integral_libfuncs (clz_optab
, "clz", '2');
4858 init_integral_libfuncs (ctz_optab
, "ctz", '2');
4859 init_integral_libfuncs (popcount_optab
, "popcount", '2');
4860 init_integral_libfuncs (parity_optab
, "parity", '2');
4862 /* Comparison libcalls for integers MUST come in pairs,
4864 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4865 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4866 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4868 /* EQ etc are floating point only. */
4869 init_floating_libfuncs (eq_optab
, "eq", '2');
4870 init_floating_libfuncs (ne_optab
, "ne", '2');
4871 init_floating_libfuncs (gt_optab
, "gt", '2');
4872 init_floating_libfuncs (ge_optab
, "ge", '2');
4873 init_floating_libfuncs (lt_optab
, "lt", '2');
4874 init_floating_libfuncs (le_optab
, "le", '2');
4875 init_floating_libfuncs (unord_optab
, "unord", '2');
4878 init_interclass_conv_libfuncs (sfloat_optab
, "float",
4879 MODE_INT
, MODE_FLOAT
);
4880 init_interclass_conv_libfuncs (sfix_optab
, "fix",
4881 MODE_FLOAT
, MODE_INT
);
4882 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
4883 MODE_FLOAT
, MODE_INT
);
4885 /* sext_optab is also used for FLOAT_EXTEND. */
4886 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
4887 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
4889 /* Use cabs for double complex abs, since systems generally have cabs.
4890 Don't define any libcall for float complex, so that cabs will be used. */
4891 if (complex_double_type_node
)
4892 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
4893 = init_one_libfunc ("cabs");
4895 /* The ffs function operates on `int'. */
4896 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
4897 = init_one_libfunc ("ffs");
4899 abort_libfunc
= init_one_libfunc ("abort");
4900 memcpy_libfunc
= init_one_libfunc ("memcpy");
4901 memmove_libfunc
= init_one_libfunc ("memmove");
4902 memcmp_libfunc
= init_one_libfunc ("memcmp");
4903 memset_libfunc
= init_one_libfunc ("memset");
4904 setbits_libfunc
= init_one_libfunc ("__setbits");
4906 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
4907 ? "_Unwind_SjLj_Resume"
4908 : "_Unwind_Resume");
4909 #ifndef DONT_USE_BUILTIN_SETJMP
4910 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
4911 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
4913 setjmp_libfunc
= init_one_libfunc ("setjmp");
4914 longjmp_libfunc
= init_one_libfunc ("longjmp");
4916 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
4917 unwind_sjlj_unregister_libfunc
4918 = init_one_libfunc ("_Unwind_SjLj_Unregister");
4920 /* For function entry/exit instrumentation. */
4921 profile_function_entry_libfunc
4922 = init_one_libfunc ("__cyg_profile_func_enter");
4923 profile_function_exit_libfunc
4924 = init_one_libfunc ("__cyg_profile_func_exit");
4926 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
4928 if (HAVE_conditional_trap
)
4929 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
4931 /* Allow the target to add more libcalls or rename some, etc. */
4932 targetm
.init_libfuncs ();
4937 /* Print information about the current contents of the optabs on
4941 debug_optab_libfuncs (void)
4947 /* Dump the arithmetic optabs. */
4948 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
4949 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4952 struct optab_handlers
*h
;
4955 h
= &o
->handlers
[j
];
4958 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
4960 fprintf (stderr
, "%s\t%s:\t%s\n",
4961 GET_RTX_NAME (o
->code
),
4963 XSTR (h
->libfunc
, 0));
4967 /* Dump the conversion optabs. */
4968 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
4969 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4970 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
4973 struct optab_handlers
*h
;
4975 o
= &convert_optab_table
[i
];
4976 h
= &o
->handlers
[j
][k
];
4979 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
4981 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
4982 GET_RTX_NAME (o
->code
),
4985 XSTR (h
->libfunc
, 0));
4993 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
4994 CODE. Return 0 on failure. */
4997 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
4998 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5000 enum machine_mode mode
= GET_MODE (op1
);
5001 enum insn_code icode
;
5004 if (!HAVE_conditional_trap
)
5007 if (mode
== VOIDmode
)
5010 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5011 if (icode
== CODE_FOR_nothing
)
5015 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5016 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5022 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5024 PUT_CODE (trap_rtx
, code
);
5025 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5029 insn
= get_insns ();
5036 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5037 or unsigned operation code. */
5039 static enum rtx_code
5040 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5052 code
= unsignedp
? LTU
: LT
;
5055 code
= unsignedp
? LEU
: LE
;
5058 code
= unsignedp
? GTU
: GT
;
5061 code
= unsignedp
? GEU
: GE
;
5064 case UNORDERED_EXPR
:
5095 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5096 unsigned operators. Do not generate compare instruction. */
5099 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5101 enum rtx_code rcode
;
5103 rtx rtx_op0
, rtx_op1
;
5105 if (!COMPARISON_CLASS_P (cond
))
5107 /* This is unlikely. While generating VEC_COND_EXPR,
5108 auto vectorizer ensures that condition is a relational
5114 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5115 t_op0
= TREE_OPERAND (cond
, 0);
5116 t_op1
= TREE_OPERAND (cond
, 1);
5119 /* Expand operands. */
5120 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5121 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5123 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5124 && GET_MODE (rtx_op0
) != VOIDmode
)
5125 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5127 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5128 && GET_MODE (rtx_op1
) != VOIDmode
)
5129 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5131 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5134 /* Return insn code for VEC_COND_EXPR EXPR. */
5136 static inline enum insn_code
5137 get_vcond_icode (tree expr
, enum machine_mode mode
)
5139 enum insn_code icode
= CODE_FOR_nothing
;
5141 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5142 icode
= vcondu_gen_code
[mode
];
5144 icode
= vcond_gen_code
[mode
];
5148 /* Return TRUE iff, appropriate vector insns are available
5149 for vector cond expr expr in VMODE mode. */
5152 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5154 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5159 /* Generate insns for VEC_COND_EXPR. */
5162 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5164 enum insn_code icode
;
5165 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5166 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5167 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5169 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5170 if (icode
== CODE_FOR_nothing
)
5174 target
= gen_reg_rtx (mode
);
5176 /* Get comparison rtx. First expand both cond expr operands. */
5177 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5179 cc_op0
= XEXP (comparison
, 0);
5180 cc_op1
= XEXP (comparison
, 1);
5181 /* Expand both operands and force them in reg, if required. */
5182 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5183 NULL_RTX
, VOIDmode
, 1);
5184 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5185 && mode
!= VOIDmode
)
5186 rtx_op1
= force_reg (mode
, rtx_op1
);
5188 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5189 NULL_RTX
, VOIDmode
, 1);
5190 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5191 && mode
!= VOIDmode
)
5192 rtx_op2
= force_reg (mode
, rtx_op2
);
5194 /* Emit instruction! */
5195 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5196 comparison
, cc_op0
, cc_op1
));
5200 #include "gt-optabs.h"