1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_LOAD_EXPR
:
298 return vec_realign_load_optab
;
304 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
308 return trapv
? addv_optab
: add_optab
;
311 return trapv
? subv_optab
: sub_optab
;
314 return trapv
? smulv_optab
: smul_optab
;
317 return trapv
? negv_optab
: neg_optab
;
320 return trapv
? absv_optab
: abs_optab
;
328 /* Generate code to perform an operation specified by TERNARY_OPTAB
329 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
341 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
343 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
344 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
345 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
346 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
349 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
351 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
355 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
356 temp
= gen_reg_rtx (mode
);
360 /* In case the insn wants input operands in modes different from
361 those of the actual operands, convert the operands. It would
362 seem that we don't need to convert CONST_INTs, but we do, so
363 that they're properly zero-extended, sign-extended or truncated
366 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
367 xop0
= convert_modes (mode0
,
368 GET_MODE (op0
) != VOIDmode
373 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
374 xop1
= convert_modes (mode1
,
375 GET_MODE (op1
) != VOIDmode
380 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
381 xop2
= convert_modes (mode2
,
382 GET_MODE (op2
) != VOIDmode
387 /* Now, if insn's predicates don't allow our operands, put them into
390 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
391 && mode0
!= VOIDmode
)
392 xop0
= copy_to_mode_reg (mode0
, xop0
);
394 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
395 && mode1
!= VOIDmode
)
396 xop1
= copy_to_mode_reg (mode1
, xop1
);
398 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
399 && mode2
!= VOIDmode
)
400 xop2
= copy_to_mode_reg (mode2
, xop2
);
402 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
409 /* Like expand_binop, but return a constant rtx if the result can be
410 calculated at compile time. The arguments and return value are
411 otherwise the same as for expand_binop. */
414 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
415 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
416 enum optab_methods methods
)
418 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
420 rtx x
= simplify_binary_operation (binoptab
->code
, mode
, op0
, op1
);
426 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
429 /* Like simplify_expand_binop, but always put the result in TARGET.
430 Return true if the expansion succeeded. */
433 force_expand_binop (enum machine_mode mode
, optab binoptab
,
434 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
435 enum optab_methods methods
)
437 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
438 target
, unsignedp
, methods
);
442 emit_move_insn (target
, x
);
446 /* This subroutine of expand_doubleword_shift handles the cases in which
447 the effective shift value is >= BITS_PER_WORD. The arguments and return
448 value are the same as for the parent routine, except that SUPERWORD_OP1
449 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
450 INTO_TARGET may be null if the caller has decided to calculate it. */
453 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
454 rtx outof_target
, rtx into_target
,
455 int unsignedp
, enum optab_methods methods
)
457 if (into_target
!= 0)
458 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
459 into_target
, unsignedp
, methods
))
462 if (outof_target
!= 0)
464 /* For a signed right shift, we must fill OUTOF_TARGET with copies
465 of the sign bit, otherwise we must fill it with zeros. */
466 if (binoptab
!= ashr_optab
)
467 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
469 if (!force_expand_binop (word_mode
, binoptab
,
470 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
471 outof_target
, unsignedp
, methods
))
477 /* This subroutine of expand_doubleword_shift handles the cases in which
478 the effective shift value is < BITS_PER_WORD. The arguments and return
479 value are the same as for the parent routine. */
482 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
483 rtx outof_input
, rtx into_input
, rtx op1
,
484 rtx outof_target
, rtx into_target
,
485 int unsignedp
, enum optab_methods methods
,
486 unsigned HOST_WIDE_INT shift_mask
)
488 optab reverse_unsigned_shift
, unsigned_shift
;
491 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
492 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
494 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
495 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
496 the opposite direction to BINOPTAB. */
497 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
499 carries
= outof_input
;
500 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
501 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
506 /* We must avoid shifting by BITS_PER_WORD bits since that is either
507 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
508 has unknown behavior. Do a single shift first, then shift by the
509 remainder. It's OK to use ~OP1 as the remainder if shift counts
510 are truncated to the mode size. */
511 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
512 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
513 if (shift_mask
== BITS_PER_WORD
- 1)
515 tmp
= immed_double_const (-1, -1, op1_mode
);
516 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
521 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
522 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
526 if (tmp
== 0 || carries
== 0)
528 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
529 carries
, tmp
, 0, unsignedp
, methods
);
533 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
534 so the result can go directly into INTO_TARGET if convenient. */
535 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
536 into_target
, unsignedp
, methods
);
540 /* Now OR in the bits carried over from OUTOF_INPUT. */
541 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
542 into_target
, unsignedp
, methods
))
545 /* Use a standard word_mode shift for the out-of half. */
546 if (outof_target
!= 0)
547 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
548 outof_target
, unsignedp
, methods
))
555 #ifdef HAVE_conditional_move
556 /* Try implementing expand_doubleword_shift using conditional moves.
557 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
558 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
559 are the shift counts to use in the former and latter case. All other
560 arguments are the same as the parent routine. */
563 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
564 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
565 rtx outof_input
, rtx into_input
,
566 rtx subword_op1
, rtx superword_op1
,
567 rtx outof_target
, rtx into_target
,
568 int unsignedp
, enum optab_methods methods
,
569 unsigned HOST_WIDE_INT shift_mask
)
571 rtx outof_superword
, into_superword
;
573 /* Put the superword version of the output into OUTOF_SUPERWORD and
575 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
576 if (outof_target
!= 0 && subword_op1
== superword_op1
)
578 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
579 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
580 into_superword
= outof_target
;
581 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
582 outof_superword
, 0, unsignedp
, methods
))
587 into_superword
= gen_reg_rtx (word_mode
);
588 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
589 outof_superword
, into_superword
,
594 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
595 if (!expand_subword_shift (op1_mode
, binoptab
,
596 outof_input
, into_input
, subword_op1
,
597 outof_target
, into_target
,
598 unsignedp
, methods
, shift_mask
))
601 /* Select between them. Do the INTO half first because INTO_SUPERWORD
602 might be the current value of OUTOF_TARGET. */
603 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 into_target
, into_superword
, word_mode
, false))
607 if (outof_target
!= 0)
608 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
609 outof_target
, outof_superword
,
617 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
618 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
619 input operand; the shift moves bits in the direction OUTOF_INPUT->
620 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
621 of the target. OP1 is the shift count and OP1_MODE is its mode.
622 If OP1 is constant, it will have been truncated as appropriate
623 and is known to be nonzero.
625 If SHIFT_MASK is zero, the result of word shifts is undefined when the
626 shift count is outside the range [0, BITS_PER_WORD). This routine must
627 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
629 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
630 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
631 fill with zeros or sign bits as appropriate.
633 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
634 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
635 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
636 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
639 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
640 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
641 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
642 function wants to calculate it itself.
644 Return true if the shift could be successfully synthesized. */
647 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
648 rtx outof_input
, rtx into_input
, rtx op1
,
649 rtx outof_target
, rtx into_target
,
650 int unsignedp
, enum optab_methods methods
,
651 unsigned HOST_WIDE_INT shift_mask
)
653 rtx superword_op1
, tmp
, cmp1
, cmp2
;
654 rtx subword_label
, done_label
;
655 enum rtx_code cmp_code
;
657 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
658 fill the result with sign or zero bits as appropriate. If so, the value
659 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
660 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
661 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
663 This isn't worthwhile for constant shifts since the optimizers will
664 cope better with in-range shift counts. */
665 if (shift_mask
>= BITS_PER_WORD
667 && !CONSTANT_P (op1
))
669 if (!expand_doubleword_shift (op1_mode
, binoptab
,
670 outof_input
, into_input
, op1
,
672 unsignedp
, methods
, shift_mask
))
674 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
675 outof_target
, unsignedp
, methods
))
680 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
681 is true when the effective shift value is less than BITS_PER_WORD.
682 Set SUPERWORD_OP1 to the shift count that should be used to shift
683 OUTOF_INPUT into INTO_TARGET when the condition is false. */
684 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
685 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
687 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
688 is a subword shift count. */
689 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
691 cmp2
= CONST0_RTX (op1_mode
);
697 /* Set CMP1 to OP1 - BITS_PER_WORD. */
698 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
700 cmp2
= CONST0_RTX (op1_mode
);
702 superword_op1
= cmp1
;
707 /* If we can compute the condition at compile time, pick the
708 appropriate subroutine. */
709 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
710 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
712 if (tmp
== const0_rtx
)
713 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
714 outof_target
, into_target
,
717 return expand_subword_shift (op1_mode
, binoptab
,
718 outof_input
, into_input
, op1
,
719 outof_target
, into_target
,
720 unsignedp
, methods
, shift_mask
);
723 #ifdef HAVE_conditional_move
724 /* Try using conditional moves to generate straight-line code. */
726 rtx start
= get_last_insn ();
727 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
728 cmp_code
, cmp1
, cmp2
,
729 outof_input
, into_input
,
731 outof_target
, into_target
,
732 unsignedp
, methods
, shift_mask
))
734 delete_insns_since (start
);
738 /* As a last resort, use branches to select the correct alternative. */
739 subword_label
= gen_label_rtx ();
740 done_label
= gen_label_rtx ();
743 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
744 0, 0, subword_label
);
747 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
748 outof_target
, into_target
,
752 emit_jump_insn (gen_jump (done_label
));
754 emit_label (subword_label
);
756 if (!expand_subword_shift (op1_mode
, binoptab
,
757 outof_input
, into_input
, op1
,
758 outof_target
, into_target
,
759 unsignedp
, methods
, shift_mask
))
762 emit_label (done_label
);
766 /* Wrapper around expand_binop which takes an rtx code to specify
767 the operation to perform, not an optab pointer. All other
768 arguments are the same. */
770 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
771 rtx op1
, rtx target
, int unsignedp
,
772 enum optab_methods methods
)
774 optab binop
= code_to_optab
[(int) code
];
778 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
781 /* Generate code to perform an operation specified by BINOPTAB
782 on operands OP0 and OP1, with result having machine-mode MODE.
784 UNSIGNEDP is for the case where we have to widen the operands
785 to perform the operation. It says to use zero-extension.
787 If TARGET is nonzero, the value
788 is generated there, if it is convenient to do so.
789 In all cases an rtx is returned for the locus of the value;
790 this may or may not be TARGET. */
793 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
794 rtx target
, int unsignedp
, enum optab_methods methods
)
796 enum optab_methods next_methods
797 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
798 ? OPTAB_WIDEN
: methods
);
799 enum mode_class
class;
800 enum machine_mode wider_mode
;
802 int commutative_op
= 0;
803 int shift_op
= (binoptab
->code
== ASHIFT
804 || binoptab
->code
== ASHIFTRT
805 || binoptab
->code
== LSHIFTRT
806 || binoptab
->code
== ROTATE
807 || binoptab
->code
== ROTATERT
);
808 rtx entry_last
= get_last_insn ();
811 class = GET_MODE_CLASS (mode
);
815 /* Load duplicate non-volatile operands once. */
816 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
818 op0
= force_not_mem (op0
);
823 op0
= force_not_mem (op0
);
824 op1
= force_not_mem (op1
);
828 /* If subtracting an integer constant, convert this into an addition of
829 the negated constant. */
831 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
833 op1
= negate_rtx (mode
, op1
);
834 binoptab
= add_optab
;
837 /* If we are inside an appropriately-short loop and we are optimizing,
838 force expensive constants into a register. */
839 if (CONSTANT_P (op0
) && optimize
840 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
842 if (GET_MODE (op0
) != VOIDmode
)
843 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
844 op0
= force_reg (mode
, op0
);
847 if (CONSTANT_P (op1
) && optimize
848 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
850 if (GET_MODE (op1
) != VOIDmode
)
851 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
852 op1
= force_reg (mode
, op1
);
855 /* Record where to delete back to if we backtrack. */
856 last
= get_last_insn ();
858 /* If operation is commutative,
859 try to make the first operand a register.
860 Even better, try to make it the same as the target.
861 Also try to make the last operand a constant. */
862 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
863 || binoptab
== smul_widen_optab
864 || binoptab
== umul_widen_optab
865 || binoptab
== smul_highpart_optab
866 || binoptab
== umul_highpart_optab
)
870 if (((target
== 0 || REG_P (target
))
874 : rtx_equal_p (op1
, target
))
875 || GET_CODE (op0
) == CONST_INT
)
883 /* If we can do it with a three-operand insn, do so. */
885 if (methods
!= OPTAB_MUST_WIDEN
886 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
888 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
889 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
890 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
892 rtx xop0
= op0
, xop1
= op1
;
897 temp
= gen_reg_rtx (mode
);
899 /* If it is a commutative operator and the modes would match
900 if we would swap the operands, we can save the conversions. */
903 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
904 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
908 tmp
= op0
; op0
= op1
; op1
= tmp
;
909 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
913 /* In case the insn wants input operands in modes different from
914 those of the actual operands, convert the operands. It would
915 seem that we don't need to convert CONST_INTs, but we do, so
916 that they're properly zero-extended, sign-extended or truncated
919 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
920 xop0
= convert_modes (mode0
,
921 GET_MODE (op0
) != VOIDmode
926 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
927 xop1
= convert_modes (mode1
,
928 GET_MODE (op1
) != VOIDmode
933 /* Now, if insn's predicates don't allow our operands, put them into
936 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
937 && mode0
!= VOIDmode
)
938 xop0
= copy_to_mode_reg (mode0
, xop0
);
940 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
941 && mode1
!= VOIDmode
)
942 xop1
= copy_to_mode_reg (mode1
, xop1
);
944 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
945 temp
= gen_reg_rtx (mode
);
947 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
950 /* If PAT is composed of more than one insn, try to add an appropriate
951 REG_EQUAL note to it. If we can't because TEMP conflicts with an
952 operand, call ourselves again, this time without a target. */
953 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
954 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
956 delete_insns_since (last
);
957 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
965 delete_insns_since (last
);
968 /* If this is a multiply, see if we can do a widening operation that
969 takes operands of this mode and makes a wider mode. */
971 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
972 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
973 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
974 != CODE_FOR_nothing
))
976 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
977 unsignedp
? umul_widen_optab
: smul_widen_optab
,
978 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
982 if (GET_MODE_CLASS (mode
) == MODE_INT
)
983 return gen_lowpart (mode
, temp
);
985 return convert_to_mode (mode
, temp
, unsignedp
);
989 /* Look for a wider mode of the same class for which we think we
990 can open-code the operation. Check for a widening multiply at the
991 wider mode as well. */
993 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
994 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
995 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
996 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
998 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
999 || (binoptab
== smul_optab
1000 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1001 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1002 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1003 != CODE_FOR_nothing
)))
1005 rtx xop0
= op0
, xop1
= op1
;
1008 /* For certain integer operations, we need not actually extend
1009 the narrow operands, as long as we will truncate
1010 the results to the same narrowness. */
1012 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1013 || binoptab
== xor_optab
1014 || binoptab
== add_optab
|| binoptab
== sub_optab
1015 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1016 && class == MODE_INT
)
1019 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1021 /* The second operand of a shift must always be extended. */
1022 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1023 no_extend
&& binoptab
!= ashl_optab
);
1025 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1026 unsignedp
, OPTAB_DIRECT
);
1029 if (class != MODE_INT
)
1032 target
= gen_reg_rtx (mode
);
1033 convert_move (target
, temp
, 0);
1037 return gen_lowpart (mode
, temp
);
1040 delete_insns_since (last
);
1044 /* These can be done a word at a time. */
1045 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1046 && class == MODE_INT
1047 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1048 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1054 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1055 won't be accurate, so use a new target. */
1056 if (target
== 0 || target
== op0
|| target
== op1
)
1057 target
= gen_reg_rtx (mode
);
1061 /* Do the actual arithmetic. */
1062 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1064 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1065 rtx x
= expand_binop (word_mode
, binoptab
,
1066 operand_subword_force (op0
, i
, mode
),
1067 operand_subword_force (op1
, i
, mode
),
1068 target_piece
, unsignedp
, next_methods
);
1073 if (target_piece
!= x
)
1074 emit_move_insn (target_piece
, x
);
1077 insns
= get_insns ();
1080 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1082 if (binoptab
->code
!= UNKNOWN
)
1084 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1085 copy_rtx (op0
), copy_rtx (op1
));
1089 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1094 /* Synthesize double word shifts from single word shifts. */
1095 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1096 || binoptab
== ashr_optab
)
1097 && class == MODE_INT
1098 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1099 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1100 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1101 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1102 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1104 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1105 enum machine_mode op1_mode
;
1107 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1108 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1109 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1111 /* Apply the truncation to constant shifts. */
1112 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1113 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1115 if (op1
== CONST0_RTX (op1_mode
))
1118 /* Make sure that this is a combination that expand_doubleword_shift
1119 can handle. See the comments there for details. */
1120 if (double_shift_mask
== 0
1121 || (shift_mask
== BITS_PER_WORD
- 1
1122 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1124 rtx insns
, equiv_value
;
1125 rtx into_target
, outof_target
;
1126 rtx into_input
, outof_input
;
1127 int left_shift
, outof_word
;
1129 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1130 won't be accurate, so use a new target. */
1131 if (target
== 0 || target
== op0
|| target
== op1
)
1132 target
= gen_reg_rtx (mode
);
1136 /* OUTOF_* is the word we are shifting bits away from, and
1137 INTO_* is the word that we are shifting bits towards, thus
1138 they differ depending on the direction of the shift and
1139 WORDS_BIG_ENDIAN. */
1141 left_shift
= binoptab
== ashl_optab
;
1142 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1144 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1145 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1147 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1148 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1150 if (expand_doubleword_shift (op1_mode
, binoptab
,
1151 outof_input
, into_input
, op1
,
1152 outof_target
, into_target
,
1153 unsignedp
, next_methods
, shift_mask
))
1155 insns
= get_insns ();
1158 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1159 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1166 /* Synthesize double word rotates from single word shifts. */
1167 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1168 && class == MODE_INT
1169 && GET_CODE (op1
) == CONST_INT
1170 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1171 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1172 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1175 rtx into_target
, outof_target
;
1176 rtx into_input
, outof_input
;
1178 int shift_count
, left_shift
, outof_word
;
1180 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1181 won't be accurate, so use a new target. Do this also if target is not
1182 a REG, first because having a register instead may open optimization
1183 opportunities, and second because if target and op0 happen to be MEMs
1184 designating the same location, we would risk clobbering it too early
1185 in the code sequence we generate below. */
1186 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1187 target
= gen_reg_rtx (mode
);
1191 shift_count
= INTVAL (op1
);
1193 /* OUTOF_* is the word we are shifting bits away from, and
1194 INTO_* is the word that we are shifting bits towards, thus
1195 they differ depending on the direction of the shift and
1196 WORDS_BIG_ENDIAN. */
1198 left_shift
= (binoptab
== rotl_optab
);
1199 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1201 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1202 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1204 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1205 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1207 if (shift_count
== BITS_PER_WORD
)
1209 /* This is just a word swap. */
1210 emit_move_insn (outof_target
, into_input
);
1211 emit_move_insn (into_target
, outof_input
);
1216 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1217 rtx first_shift_count
, second_shift_count
;
1218 optab reverse_unsigned_shift
, unsigned_shift
;
1220 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1221 ? lshr_optab
: ashl_optab
);
1223 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1224 ? ashl_optab
: lshr_optab
);
1226 if (shift_count
> BITS_PER_WORD
)
1228 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1229 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1233 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1234 second_shift_count
= GEN_INT (shift_count
);
1237 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1238 outof_input
, first_shift_count
,
1239 NULL_RTX
, unsignedp
, next_methods
);
1240 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1241 into_input
, second_shift_count
,
1242 NULL_RTX
, unsignedp
, next_methods
);
1244 if (into_temp1
!= 0 && into_temp2
!= 0)
1245 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1246 into_target
, unsignedp
, next_methods
);
1250 if (inter
!= 0 && inter
!= into_target
)
1251 emit_move_insn (into_target
, inter
);
1253 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1254 into_input
, first_shift_count
,
1255 NULL_RTX
, unsignedp
, next_methods
);
1256 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1257 outof_input
, second_shift_count
,
1258 NULL_RTX
, unsignedp
, next_methods
);
1260 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1261 inter
= expand_binop (word_mode
, ior_optab
,
1262 outof_temp1
, outof_temp2
,
1263 outof_target
, unsignedp
, next_methods
);
1265 if (inter
!= 0 && inter
!= outof_target
)
1266 emit_move_insn (outof_target
, inter
);
1269 insns
= get_insns ();
1279 /* These can be done a word at a time by propagating carries. */
1280 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1281 && class == MODE_INT
1282 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1283 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1286 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1287 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1288 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1289 rtx xop0
, xop1
, xtarget
;
1291 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1292 value is one of those, use it. Otherwise, use 1 since it is the
1293 one easiest to get. */
1294 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1295 int normalizep
= STORE_FLAG_VALUE
;
1300 /* Prepare the operands. */
1301 xop0
= force_reg (mode
, op0
);
1302 xop1
= force_reg (mode
, op1
);
1304 xtarget
= gen_reg_rtx (mode
);
1306 if (target
== 0 || !REG_P (target
))
1309 /* Indicate for flow that the entire target reg is being set. */
1311 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1313 /* Do the actual arithmetic. */
1314 for (i
= 0; i
< nwords
; i
++)
1316 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1317 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1318 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1319 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1322 /* Main add/subtract of the input operands. */
1323 x
= expand_binop (word_mode
, binoptab
,
1324 op0_piece
, op1_piece
,
1325 target_piece
, unsignedp
, next_methods
);
1331 /* Store carry from main add/subtract. */
1332 carry_out
= gen_reg_rtx (word_mode
);
1333 carry_out
= emit_store_flag_force (carry_out
,
1334 (binoptab
== add_optab
1337 word_mode
, 1, normalizep
);
1344 /* Add/subtract previous carry to main result. */
1345 newx
= expand_binop (word_mode
,
1346 normalizep
== 1 ? binoptab
: otheroptab
,
1348 NULL_RTX
, 1, next_methods
);
1352 /* Get out carry from adding/subtracting carry in. */
1353 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1354 carry_tmp
= emit_store_flag_force (carry_tmp
,
1355 (binoptab
== add_optab
1358 word_mode
, 1, normalizep
);
1360 /* Logical-ior the two poss. carry together. */
1361 carry_out
= expand_binop (word_mode
, ior_optab
,
1362 carry_out
, carry_tmp
,
1363 carry_out
, 0, next_methods
);
1367 emit_move_insn (target_piece
, newx
);
1370 carry_in
= carry_out
;
1373 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1375 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1376 || ! rtx_equal_p (target
, xtarget
))
1378 rtx temp
= emit_move_insn (target
, xtarget
);
1380 set_unique_reg_note (temp
,
1382 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1393 delete_insns_since (last
);
1396 /* If we want to multiply two two-word values and have normal and widening
1397 multiplies of single-word values, we can do this with three smaller
1398 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1399 because we are not operating on one word at a time.
1401 The multiplication proceeds as follows:
1402 _______________________
1403 [__op0_high_|__op0_low__]
1404 _______________________
1405 * [__op1_high_|__op1_low__]
1406 _______________________________________________
1407 _______________________
1408 (1) [__op0_low__*__op1_low__]
1409 _______________________
1410 (2a) [__op0_low__*__op1_high_]
1411 _______________________
1412 (2b) [__op0_high_*__op1_low__]
1413 _______________________
1414 (3) [__op0_high_*__op1_high_]
1417 This gives a 4-word result. Since we are only interested in the
1418 lower 2 words, partial result (3) and the upper words of (2a) and
1419 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1420 calculated using non-widening multiplication.
1422 (1), however, needs to be calculated with an unsigned widening
1423 multiplication. If this operation is not directly supported we
1424 try using a signed widening multiplication and adjust the result.
1425 This adjustment works as follows:
1427 If both operands are positive then no adjustment is needed.
1429 If the operands have different signs, for example op0_low < 0 and
1430 op1_low >= 0, the instruction treats the most significant bit of
1431 op0_low as a sign bit instead of a bit with significance
1432 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1433 with 2**BITS_PER_WORD - op0_low, and two's complements the
1434 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1437 Similarly, if both operands are negative, we need to add
1438 (op0_low + op1_low) * 2**BITS_PER_WORD.
1440 We use a trick to adjust quickly. We logically shift op0_low right
1441 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1442 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1443 logical shift exists, we do an arithmetic right shift and subtract
1446 if (binoptab
== smul_optab
1447 && class == MODE_INT
1448 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1449 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1450 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1451 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1452 != CODE_FOR_nothing
)
1453 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1454 != CODE_FOR_nothing
)))
1456 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1457 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1458 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1459 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1460 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1461 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1463 rtx op0_xhigh
= NULL_RTX
;
1464 rtx op1_xhigh
= NULL_RTX
;
1466 /* If the target is the same as one of the inputs, don't use it. This
1467 prevents problems with the REG_EQUAL note. */
1468 if (target
== op0
|| target
== op1
1469 || (target
!= 0 && !REG_P (target
)))
1472 /* Multiply the two lower words to get a double-word product.
1473 If unsigned widening multiplication is available, use that;
1474 otherwise use the signed form and compensate. */
1476 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1478 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1479 target
, 1, OPTAB_DIRECT
);
1481 /* If we didn't succeed, delete everything we did so far. */
1483 delete_insns_since (last
);
1485 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1489 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1490 != CODE_FOR_nothing
)
1492 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1493 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1494 target
, 1, OPTAB_DIRECT
);
1495 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1496 NULL_RTX
, 1, next_methods
);
1498 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1499 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1502 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1503 NULL_RTX
, 0, next_methods
);
1505 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1506 op0_xhigh
, op0_xhigh
, 0,
1510 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1511 NULL_RTX
, 1, next_methods
);
1513 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1514 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1517 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1518 NULL_RTX
, 0, next_methods
);
1520 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1521 op1_xhigh
, op1_xhigh
, 0,
1526 /* If we have been able to directly compute the product of the
1527 low-order words of the operands and perform any required adjustments
1528 of the operands, we proceed by trying two more multiplications
1529 and then computing the appropriate sum.
1531 We have checked above that the required addition is provided.
1532 Full-word addition will normally always succeed, especially if
1533 it is provided at all, so we don't worry about its failure. The
1534 multiplication may well fail, however, so we do handle that. */
1536 if (product
&& op0_xhigh
&& op1_xhigh
)
1538 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1539 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1540 NULL_RTX
, 0, OPTAB_DIRECT
);
1542 if (!REG_P (product_high
))
1543 product_high
= force_reg (word_mode
, product_high
);
1546 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1547 product_high
, 0, next_methods
);
1549 if (temp
!= 0 && temp
!= product_high
)
1550 emit_move_insn (product_high
, temp
);
1553 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1554 NULL_RTX
, 0, OPTAB_DIRECT
);
1557 temp
= expand_binop (word_mode
, add_optab
, temp
,
1558 product_high
, product_high
,
1561 if (temp
!= 0 && temp
!= product_high
)
1562 emit_move_insn (product_high
, temp
);
1564 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1568 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1570 temp
= emit_move_insn (product
, product
);
1571 set_unique_reg_note (temp
,
1573 gen_rtx_fmt_ee (MULT
, mode
,
1582 /* If we get here, we couldn't do it for some reason even though we
1583 originally thought we could. Delete anything we've emitted in
1586 delete_insns_since (last
);
1589 /* It can't be open-coded in this mode.
1590 Use a library call if one is available and caller says that's ok. */
1592 if (binoptab
->handlers
[(int) mode
].libfunc
1593 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1597 enum machine_mode op1_mode
= mode
;
1604 op1_mode
= word_mode
;
1605 /* Specify unsigned here,
1606 since negative shift counts are meaningless. */
1607 op1x
= convert_to_mode (word_mode
, op1
, 1);
1610 if (GET_MODE (op0
) != VOIDmode
1611 && GET_MODE (op0
) != mode
)
1612 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1614 /* Pass 1 for NO_QUEUE so we don't lose any increments
1615 if the libcall is cse'd or moved. */
1616 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1617 NULL_RTX
, LCT_CONST
, mode
, 2,
1618 op0
, mode
, op1x
, op1_mode
);
1620 insns
= get_insns ();
1623 target
= gen_reg_rtx (mode
);
1624 emit_libcall_block (insns
, target
, value
,
1625 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1630 delete_insns_since (last
);
1632 /* It can't be done in this mode. Can we do it in a wider mode? */
1634 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1635 || methods
== OPTAB_MUST_WIDEN
))
1637 /* Caller says, don't even try. */
1638 delete_insns_since (entry_last
);
1642 /* Compute the value of METHODS to pass to recursive calls.
1643 Don't allow widening to be tried recursively. */
1645 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1647 /* Look for a wider mode of the same class for which it appears we can do
1650 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1652 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1653 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1655 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1656 != CODE_FOR_nothing
)
1657 || (methods
== OPTAB_LIB
1658 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1660 rtx xop0
= op0
, xop1
= op1
;
1663 /* For certain integer operations, we need not actually extend
1664 the narrow operands, as long as we will truncate
1665 the results to the same narrowness. */
1667 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1668 || binoptab
== xor_optab
1669 || binoptab
== add_optab
|| binoptab
== sub_optab
1670 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1671 && class == MODE_INT
)
1674 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1675 unsignedp
, no_extend
);
1677 /* The second operand of a shift must always be extended. */
1678 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1679 no_extend
&& binoptab
!= ashl_optab
);
1681 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1682 unsignedp
, methods
);
1685 if (class != MODE_INT
)
1688 target
= gen_reg_rtx (mode
);
1689 convert_move (target
, temp
, 0);
1693 return gen_lowpart (mode
, temp
);
1696 delete_insns_since (last
);
1701 delete_insns_since (entry_last
);
1705 /* Expand a binary operator which has both signed and unsigned forms.
1706 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1709 If we widen unsigned operands, we may use a signed wider operation instead
1710 of an unsigned wider operation, since the result would be the same. */
1713 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1714 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1715 enum optab_methods methods
)
1718 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1719 struct optab wide_soptab
;
1721 /* Do it without widening, if possible. */
1722 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1723 unsignedp
, OPTAB_DIRECT
);
1724 if (temp
|| methods
== OPTAB_DIRECT
)
1727 /* Try widening to a signed int. Make a fake signed optab that
1728 hides any signed insn for direct use. */
1729 wide_soptab
= *soptab
;
1730 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1731 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1733 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1734 unsignedp
, OPTAB_WIDEN
);
1736 /* For unsigned operands, try widening to an unsigned int. */
1737 if (temp
== 0 && unsignedp
)
1738 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1739 unsignedp
, OPTAB_WIDEN
);
1740 if (temp
|| methods
== OPTAB_WIDEN
)
1743 /* Use the right width lib call if that exists. */
1744 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1745 if (temp
|| methods
== OPTAB_LIB
)
1748 /* Must widen and use a lib call, use either signed or unsigned. */
1749 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1750 unsignedp
, methods
);
1754 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1755 unsignedp
, methods
);
1759 /* Generate code to perform an operation specified by UNOPPTAB
1760 on operand OP0, with two results to TARG0 and TARG1.
1761 We assume that the order of the operands for the instruction
1762 is TARG0, TARG1, OP0.
1764 Either TARG0 or TARG1 may be zero, but what that means is that
1765 the result is not actually wanted. We will generate it into
1766 a dummy pseudo-reg and discard it. They may not both be zero.
1768 Returns 1 if this operation can be performed; 0 if not. */
1771 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1774 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1775 enum mode_class
class;
1776 enum machine_mode wider_mode
;
1777 rtx entry_last
= get_last_insn ();
1780 class = GET_MODE_CLASS (mode
);
1783 op0
= force_not_mem (op0
);
1786 targ0
= gen_reg_rtx (mode
);
1788 targ1
= gen_reg_rtx (mode
);
1790 /* Record where to go back to if we fail. */
1791 last
= get_last_insn ();
1793 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1795 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1796 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1800 if (GET_MODE (xop0
) != VOIDmode
1801 && GET_MODE (xop0
) != mode0
)
1802 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1804 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1805 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1806 xop0
= copy_to_mode_reg (mode0
, xop0
);
1808 /* We could handle this, but we should always be called with a pseudo
1809 for our targets and all insns should take them as outputs. */
1810 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1811 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1814 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1821 delete_insns_since (last
);
1824 /* It can't be done in this mode. Can we do it in a wider mode? */
1826 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1828 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1829 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1831 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1832 != CODE_FOR_nothing
)
1834 rtx t0
= gen_reg_rtx (wider_mode
);
1835 rtx t1
= gen_reg_rtx (wider_mode
);
1836 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1838 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1840 convert_move (targ0
, t0
, unsignedp
);
1841 convert_move (targ1
, t1
, unsignedp
);
1845 delete_insns_since (last
);
1850 delete_insns_since (entry_last
);
1854 /* Generate code to perform an operation specified by BINOPTAB
1855 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1856 We assume that the order of the operands for the instruction
1857 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1858 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1860 Either TARG0 or TARG1 may be zero, but what that means is that
1861 the result is not actually wanted. We will generate it into
1862 a dummy pseudo-reg and discard it. They may not both be zero.
1864 Returns 1 if this operation can be performed; 0 if not. */
1867 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1870 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1871 enum mode_class
class;
1872 enum machine_mode wider_mode
;
1873 rtx entry_last
= get_last_insn ();
1876 class = GET_MODE_CLASS (mode
);
1880 op0
= force_not_mem (op0
);
1881 op1
= force_not_mem (op1
);
1884 /* If we are inside an appropriately-short loop and we are optimizing,
1885 force expensive constants into a register. */
1886 if (CONSTANT_P (op0
) && optimize
1887 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1888 op0
= force_reg (mode
, op0
);
1890 if (CONSTANT_P (op1
) && optimize
1891 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1892 op1
= force_reg (mode
, op1
);
1895 targ0
= gen_reg_rtx (mode
);
1897 targ1
= gen_reg_rtx (mode
);
1899 /* Record where to go back to if we fail. */
1900 last
= get_last_insn ();
1902 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1904 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1905 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1906 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1908 rtx xop0
= op0
, xop1
= op1
;
1910 /* In case the insn wants input operands in modes different from
1911 those of the actual operands, convert the operands. It would
1912 seem that we don't need to convert CONST_INTs, but we do, so
1913 that they're properly zero-extended, sign-extended or truncated
1916 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1917 xop0
= convert_modes (mode0
,
1918 GET_MODE (op0
) != VOIDmode
1923 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1924 xop1
= convert_modes (mode1
,
1925 GET_MODE (op1
) != VOIDmode
1930 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1931 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1932 xop0
= copy_to_mode_reg (mode0
, xop0
);
1934 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1935 xop1
= copy_to_mode_reg (mode1
, xop1
);
1937 /* We could handle this, but we should always be called with a pseudo
1938 for our targets and all insns should take them as outputs. */
1939 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1940 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1943 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1950 delete_insns_since (last
);
1953 /* It can't be done in this mode. Can we do it in a wider mode? */
1955 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1957 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1958 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1960 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1961 != CODE_FOR_nothing
)
1963 rtx t0
= gen_reg_rtx (wider_mode
);
1964 rtx t1
= gen_reg_rtx (wider_mode
);
1965 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1966 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1968 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1971 convert_move (targ0
, t0
, unsignedp
);
1972 convert_move (targ1
, t1
, unsignedp
);
1976 delete_insns_since (last
);
1981 delete_insns_since (entry_last
);
1985 /* Expand the two-valued library call indicated by BINOPTAB, but
1986 preserve only one of the values. If TARG0 is non-NULL, the first
1987 value is placed into TARG0; otherwise the second value is placed
1988 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
1989 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
1990 This routine assumes that the value returned by the library call is
1991 as if the return value was of an integral mode twice as wide as the
1992 mode of OP0. Returns 1 if the call was successful. */
1995 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
1996 rtx targ0
, rtx targ1
, enum rtx_code code
)
1998 enum machine_mode mode
;
1999 enum machine_mode libval_mode
;
2003 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2004 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2007 mode
= GET_MODE (op0
);
2008 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2011 /* The value returned by the library function will have twice as
2012 many bits as the nominal MODE. */
2013 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2016 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2017 NULL_RTX
, LCT_CONST
,
2021 /* Get the part of VAL containing the value that we want. */
2022 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2023 targ0
? 0 : GET_MODE_SIZE (mode
));
2024 insns
= get_insns ();
2026 /* Move the into the desired location. */
2027 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2028 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2034 /* Wrapper around expand_unop which takes an rtx code to specify
2035 the operation to perform, not an optab pointer. All other
2036 arguments are the same. */
2038 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2039 rtx target
, int unsignedp
)
2041 optab unop
= code_to_optab
[(int) code
];
2045 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2051 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2053 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2055 enum mode_class
class = GET_MODE_CLASS (mode
);
2056 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2058 enum machine_mode wider_mode
;
2059 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2060 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2062 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2063 != CODE_FOR_nothing
)
2065 rtx xop0
, temp
, last
;
2067 last
= get_last_insn ();
2070 target
= gen_reg_rtx (mode
);
2071 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2072 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2074 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2075 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2076 - GET_MODE_BITSIZE (mode
)),
2077 target
, true, OPTAB_DIRECT
);
2079 delete_insns_since (last
);
2088 /* Try calculating (parity x) as (and (popcount x) 1), where
2089 popcount can also be done in a wider mode. */
2091 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2093 enum mode_class
class = GET_MODE_CLASS (mode
);
2094 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2096 enum machine_mode wider_mode
;
2097 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2098 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2100 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2101 != CODE_FOR_nothing
)
2103 rtx xop0
, temp
, last
;
2105 last
= get_last_insn ();
2108 target
= gen_reg_rtx (mode
);
2109 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2110 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2113 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2114 target
, true, OPTAB_DIRECT
);
2116 delete_insns_since (last
);
2125 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2126 conditions, VAL may already be a SUBREG against which we cannot generate
2127 a further SUBREG. In this case, we expect forcing the value into a
2128 register will work around the situation. */
2131 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2132 enum machine_mode imode
)
2135 ret
= lowpart_subreg (omode
, val
, imode
);
2138 val
= force_reg (imode
, val
);
2139 ret
= lowpart_subreg (omode
, val
, imode
);
2140 gcc_assert (ret
!= NULL
);
2145 /* Expand a floating point absolute value or negation operation via a
2146 logical operation on the sign bit. */
2149 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2150 rtx op0
, rtx target
)
2152 const struct real_format
*fmt
;
2153 int bitpos
, word
, nwords
, i
;
2154 enum machine_mode imode
;
2155 HOST_WIDE_INT hi
, lo
;
2158 /* The format has to have a simple sign bit. */
2159 fmt
= REAL_MODE_FORMAT (mode
);
2163 bitpos
= fmt
->signbit
;
2167 /* Don't create negative zeros if the format doesn't support them. */
2168 if (code
== NEG
&& !fmt
->has_signed_zero
)
2171 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2173 imode
= int_mode_for_mode (mode
);
2174 if (imode
== BLKmode
)
2183 if (FLOAT_WORDS_BIG_ENDIAN
)
2184 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2186 word
= bitpos
/ BITS_PER_WORD
;
2187 bitpos
= bitpos
% BITS_PER_WORD
;
2188 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2191 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2194 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2198 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2204 if (target
== 0 || target
== op0
)
2205 target
= gen_reg_rtx (mode
);
2211 for (i
= 0; i
< nwords
; ++i
)
2213 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2214 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2218 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2220 immed_double_const (lo
, hi
, imode
),
2221 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2222 if (temp
!= targ_piece
)
2223 emit_move_insn (targ_piece
, temp
);
2226 emit_move_insn (targ_piece
, op0_piece
);
2229 insns
= get_insns ();
2232 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2233 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2237 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2238 gen_lowpart (imode
, op0
),
2239 immed_double_const (lo
, hi
, imode
),
2240 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2241 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2243 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2244 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2250 /* Generate code to perform an operation specified by UNOPTAB
2251 on operand OP0, with result having machine-mode MODE.
2253 UNSIGNEDP is for the case where we have to widen the operands
2254 to perform the operation. It says to use zero-extension.
2256 If TARGET is nonzero, the value
2257 is generated there, if it is convenient to do so.
2258 In all cases an rtx is returned for the locus of the value;
2259 this may or may not be TARGET. */
2262 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2265 enum mode_class
class;
2266 enum machine_mode wider_mode
;
2268 rtx last
= get_last_insn ();
2271 class = GET_MODE_CLASS (mode
);
2274 op0
= force_not_mem (op0
);
2276 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2278 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2279 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2285 temp
= gen_reg_rtx (mode
);
2287 if (GET_MODE (xop0
) != VOIDmode
2288 && GET_MODE (xop0
) != mode0
)
2289 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2291 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2293 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2294 xop0
= copy_to_mode_reg (mode0
, xop0
);
2296 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2297 temp
= gen_reg_rtx (mode
);
2299 pat
= GEN_FCN (icode
) (temp
, xop0
);
2302 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2303 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2305 delete_insns_since (last
);
2306 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2314 delete_insns_since (last
);
2317 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2319 /* Widening clz needs special treatment. */
2320 if (unoptab
== clz_optab
)
2322 temp
= widen_clz (mode
, op0
, target
);
2329 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2330 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2331 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2333 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2337 /* For certain operations, we need not actually extend
2338 the narrow operand, as long as we will truncate the
2339 results to the same narrowness. */
2341 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2342 (unoptab
== neg_optab
2343 || unoptab
== one_cmpl_optab
)
2344 && class == MODE_INT
);
2346 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2351 if (class != MODE_INT
)
2354 target
= gen_reg_rtx (mode
);
2355 convert_move (target
, temp
, 0);
2359 return gen_lowpart (mode
, temp
);
2362 delete_insns_since (last
);
2366 /* These can be done a word at a time. */
2367 if (unoptab
== one_cmpl_optab
2368 && class == MODE_INT
2369 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2370 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2375 if (target
== 0 || target
== op0
)
2376 target
= gen_reg_rtx (mode
);
2380 /* Do the actual arithmetic. */
2381 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2383 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2384 rtx x
= expand_unop (word_mode
, unoptab
,
2385 operand_subword_force (op0
, i
, mode
),
2386 target_piece
, unsignedp
);
2388 if (target_piece
!= x
)
2389 emit_move_insn (target_piece
, x
);
2392 insns
= get_insns ();
2395 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2396 gen_rtx_fmt_e (unoptab
->code
, mode
,
2401 if (unoptab
->code
== NEG
)
2403 /* Try negating floating point values by flipping the sign bit. */
2404 if (class == MODE_FLOAT
)
2406 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2411 /* If there is no negation pattern, and we have no negative zero,
2412 try subtracting from zero. */
2413 if (!HONOR_SIGNED_ZEROS (mode
))
2415 temp
= expand_binop (mode
, (unoptab
== negv_optab
2416 ? subv_optab
: sub_optab
),
2417 CONST0_RTX (mode
), op0
, target
,
2418 unsignedp
, OPTAB_DIRECT
);
2424 /* Try calculating parity (x) as popcount (x) % 2. */
2425 if (unoptab
== parity_optab
)
2427 temp
= expand_parity (mode
, op0
, target
);
2433 /* Now try a library call in this mode. */
2434 if (unoptab
->handlers
[(int) mode
].libfunc
)
2438 enum machine_mode outmode
= mode
;
2440 /* All of these functions return small values. Thus we choose to
2441 have them return something that isn't a double-word. */
2442 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2443 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2445 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2449 /* Pass 1 for NO_QUEUE so we don't lose any increments
2450 if the libcall is cse'd or moved. */
2451 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2452 NULL_RTX
, LCT_CONST
, outmode
,
2454 insns
= get_insns ();
2457 target
= gen_reg_rtx (outmode
);
2458 emit_libcall_block (insns
, target
, value
,
2459 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2464 /* It can't be done in this mode. Can we do it in a wider mode? */
2466 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2468 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2469 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2471 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2472 != CODE_FOR_nothing
)
2473 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2477 /* For certain operations, we need not actually extend
2478 the narrow operand, as long as we will truncate the
2479 results to the same narrowness. */
2481 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2482 (unoptab
== neg_optab
2483 || unoptab
== one_cmpl_optab
)
2484 && class == MODE_INT
);
2486 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2489 /* If we are generating clz using wider mode, adjust the
2491 if (unoptab
== clz_optab
&& temp
!= 0)
2492 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2493 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2494 - GET_MODE_BITSIZE (mode
)),
2495 target
, true, OPTAB_DIRECT
);
2499 if (class != MODE_INT
)
2502 target
= gen_reg_rtx (mode
);
2503 convert_move (target
, temp
, 0);
2507 return gen_lowpart (mode
, temp
);
2510 delete_insns_since (last
);
2515 /* One final attempt at implementing negation via subtraction,
2516 this time allowing widening of the operand. */
2517 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2520 temp
= expand_binop (mode
,
2521 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2522 CONST0_RTX (mode
), op0
,
2523 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2531 /* Emit code to compute the absolute value of OP0, with result to
2532 TARGET if convenient. (TARGET may be 0.) The return value says
2533 where the result actually is to be found.
2535 MODE is the mode of the operand; the mode of the result is
2536 different but can be deduced from MODE.
2541 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2542 int result_unsignedp
)
2547 result_unsignedp
= 1;
2549 /* First try to do it with a special abs instruction. */
2550 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2555 /* For floating point modes, try clearing the sign bit. */
2556 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2558 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2563 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2564 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2565 && !HONOR_SIGNED_ZEROS (mode
))
2567 rtx last
= get_last_insn ();
2569 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2571 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2577 delete_insns_since (last
);
2580 /* If this machine has expensive jumps, we can do integer absolute
2581 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2582 where W is the width of MODE. */
2584 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2586 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2587 size_int (GET_MODE_BITSIZE (mode
) - 1),
2590 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2593 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2594 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2604 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2605 int result_unsignedp
, int safe
)
2610 result_unsignedp
= 1;
2612 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2616 /* If that does not win, use conditional jump and negate. */
2618 /* It is safe to use the target if it is the same
2619 as the source if this is also a pseudo register */
2620 if (op0
== target
&& REG_P (op0
)
2621 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2624 op1
= gen_label_rtx ();
2625 if (target
== 0 || ! safe
2626 || GET_MODE (target
) != mode
2627 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2629 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2630 target
= gen_reg_rtx (mode
);
2632 emit_move_insn (target
, op0
);
2635 /* If this mode is an integer too wide to compare properly,
2636 compare word by word. Rely on CSE to optimize constant cases. */
2637 if (GET_MODE_CLASS (mode
) == MODE_INT
2638 && ! can_compare_p (GE
, mode
, ccp_jump
))
2639 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2642 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2643 NULL_RTX
, NULL_RTX
, op1
);
2645 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2648 emit_move_insn (target
, op0
);
2654 /* A subroutine of expand_copysign, perform the copysign operation using the
2655 abs and neg primitives advertised to exist on the target. The assumption
2656 is that we have a split register file, and leaving op0 in fp registers,
2657 and not playing with subregs so much, will help the register allocator. */
2660 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2661 int bitpos
, bool op0_is_abs
)
2663 enum machine_mode imode
;
2664 HOST_WIDE_INT hi
, lo
;
2673 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2680 if (target
== NULL_RTX
)
2681 target
= copy_to_reg (op0
);
2683 emit_move_insn (target
, op0
);
2686 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2688 imode
= int_mode_for_mode (mode
);
2689 if (imode
== BLKmode
)
2691 op1
= gen_lowpart (imode
, op1
);
2696 if (FLOAT_WORDS_BIG_ENDIAN
)
2697 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2699 word
= bitpos
/ BITS_PER_WORD
;
2700 bitpos
= bitpos
% BITS_PER_WORD
;
2701 op1
= operand_subword_force (op1
, word
, mode
);
2704 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2707 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2711 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2715 op1
= expand_binop (imode
, and_optab
, op1
,
2716 immed_double_const (lo
, hi
, imode
),
2717 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2719 label
= gen_label_rtx ();
2720 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2722 if (GET_CODE (op0
) == CONST_DOUBLE
)
2723 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2725 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2727 emit_move_insn (target
, op0
);
2735 /* A subroutine of expand_copysign, perform the entire copysign operation
2736 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2737 is true if op0 is known to have its sign bit clear. */
2740 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2741 int bitpos
, bool op0_is_abs
)
2743 enum machine_mode imode
;
2744 HOST_WIDE_INT hi
, lo
;
2745 int word
, nwords
, i
;
2748 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2750 imode
= int_mode_for_mode (mode
);
2751 if (imode
== BLKmode
)
2760 if (FLOAT_WORDS_BIG_ENDIAN
)
2761 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2763 word
= bitpos
/ BITS_PER_WORD
;
2764 bitpos
= bitpos
% BITS_PER_WORD
;
2765 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2768 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2771 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2775 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2779 if (target
== 0 || target
== op0
|| target
== op1
)
2780 target
= gen_reg_rtx (mode
);
2786 for (i
= 0; i
< nwords
; ++i
)
2788 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2789 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2794 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2795 immed_double_const (~lo
, ~hi
, imode
),
2796 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2798 op1
= expand_binop (imode
, and_optab
,
2799 operand_subword_force (op1
, i
, mode
),
2800 immed_double_const (lo
, hi
, imode
),
2801 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2803 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2804 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2805 if (temp
!= targ_piece
)
2806 emit_move_insn (targ_piece
, temp
);
2809 emit_move_insn (targ_piece
, op0_piece
);
2812 insns
= get_insns ();
2815 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2819 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2820 immed_double_const (lo
, hi
, imode
),
2821 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2823 op0
= gen_lowpart (imode
, op0
);
2825 op0
= expand_binop (imode
, and_optab
, op0
,
2826 immed_double_const (~lo
, ~hi
, imode
),
2827 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2829 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2830 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2831 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2837 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2838 scalar floating point mode. Return NULL if we do not know how to
2839 expand the operation inline. */
2842 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2844 enum machine_mode mode
= GET_MODE (op0
);
2845 const struct real_format
*fmt
;
2850 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2851 gcc_assert (GET_MODE (op1
) == mode
);
2853 /* First try to do it with a special instruction. */
2854 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2855 target
, 0, OPTAB_DIRECT
);
2859 fmt
= REAL_MODE_FORMAT (mode
);
2860 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2863 bitpos
= fmt
->signbit
;
2868 if (GET_CODE (op0
) == CONST_DOUBLE
)
2870 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2871 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2875 if (GET_CODE (op0
) == CONST_DOUBLE
2876 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2877 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
))
2879 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2880 bitpos
, op0_is_abs
);
2885 return expand_copysign_bit (mode
, op0
, op1
, target
, bitpos
, op0_is_abs
);
2888 /* Generate an instruction whose insn-code is INSN_CODE,
2889 with two operands: an output TARGET and an input OP0.
2890 TARGET *must* be nonzero, and the output is always stored there.
2891 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2892 the value that is stored into TARGET. */
2895 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2898 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2903 /* Sign and zero extension from memory is often done specially on
2904 RISC machines, so forcing into a register here can pessimize
2906 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2907 op0
= force_not_mem (op0
);
2909 /* Now, if insn does not accept our operands, put them into pseudos. */
2911 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2912 op0
= copy_to_mode_reg (mode0
, op0
);
2914 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2915 || (flag_force_mem
&& MEM_P (temp
)))
2916 temp
= gen_reg_rtx (GET_MODE (temp
));
2918 pat
= GEN_FCN (icode
) (temp
, op0
);
2920 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2921 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2926 emit_move_insn (target
, temp
);
2929 /* Emit code to perform a series of operations on a multi-word quantity, one
2932 Such a block is preceded by a CLOBBER of the output, consists of multiple
2933 insns, each setting one word of the output, and followed by a SET copying
2934 the output to itself.
2936 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2937 note indicating that it doesn't conflict with the (also multi-word)
2938 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2941 INSNS is a block of code generated to perform the operation, not including
2942 the CLOBBER and final copy. All insns that compute intermediate values
2943 are first emitted, followed by the block as described above.
2945 TARGET, OP0, and OP1 are the output and inputs of the operations,
2946 respectively. OP1 may be zero for a unary operation.
2948 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2951 If TARGET is not a register, INSNS is simply emitted with no special
2952 processing. Likewise if anything in INSNS is not an INSN or if
2953 there is a libcall block inside INSNS.
2955 The final insn emitted is returned. */
2958 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2960 rtx prev
, next
, first
, last
, insn
;
2962 if (!REG_P (target
) || reload_in_progress
)
2963 return emit_insn (insns
);
2965 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2966 if (!NONJUMP_INSN_P (insn
)
2967 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2968 return emit_insn (insns
);
2970 /* First emit all insns that do not store into words of the output and remove
2971 these from the list. */
2972 for (insn
= insns
; insn
; insn
= next
)
2977 next
= NEXT_INSN (insn
);
2979 /* Some ports (cris) create a libcall regions at their own. We must
2980 avoid any potential nesting of LIBCALLs. */
2981 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2982 remove_note (insn
, note
);
2983 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2984 remove_note (insn
, note
);
2986 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2987 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2988 set
= PATTERN (insn
);
2989 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2991 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2992 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2994 set
= XVECEXP (PATTERN (insn
), 0, i
);
3002 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3004 if (PREV_INSN (insn
))
3005 NEXT_INSN (PREV_INSN (insn
)) = next
;
3010 PREV_INSN (next
) = PREV_INSN (insn
);
3016 prev
= get_last_insn ();
3018 /* Now write the CLOBBER of the output, followed by the setting of each
3019 of the words, followed by the final copy. */
3020 if (target
!= op0
&& target
!= op1
)
3021 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3023 for (insn
= insns
; insn
; insn
= next
)
3025 next
= NEXT_INSN (insn
);
3028 if (op1
&& REG_P (op1
))
3029 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3032 if (op0
&& REG_P (op0
))
3033 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3037 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3038 != CODE_FOR_nothing
)
3040 last
= emit_move_insn (target
, target
);
3042 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3046 last
= get_last_insn ();
3048 /* Remove any existing REG_EQUAL note from "last", or else it will
3049 be mistaken for a note referring to the full contents of the
3050 alleged libcall value when found together with the REG_RETVAL
3051 note added below. An existing note can come from an insn
3052 expansion at "last". */
3053 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3057 first
= get_insns ();
3059 first
= NEXT_INSN (prev
);
3061 /* Encapsulate the block so it gets manipulated as a unit. */
3062 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3064 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3069 /* Emit code to make a call to a constant function or a library call.
3071 INSNS is a list containing all insns emitted in the call.
3072 These insns leave the result in RESULT. Our block is to copy RESULT
3073 to TARGET, which is logically equivalent to EQUIV.
3075 We first emit any insns that set a pseudo on the assumption that these are
3076 loading constants into registers; doing so allows them to be safely cse'ed
3077 between blocks. Then we emit all the other insns in the block, followed by
3078 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3079 note with an operand of EQUIV.
3081 Moving assignments to pseudos outside of the block is done to improve
3082 the generated code, but is not required to generate correct code,
3083 hence being unable to move an assignment is not grounds for not making
3084 a libcall block. There are two reasons why it is safe to leave these
3085 insns inside the block: First, we know that these pseudos cannot be
3086 used in generated RTL outside the block since they are created for
3087 temporary purposes within the block. Second, CSE will not record the
3088 values of anything set inside a libcall block, so we know they must
3089 be dead at the end of the block.
3091 Except for the first group of insns (the ones setting pseudos), the
3092 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3095 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3097 rtx final_dest
= target
;
3098 rtx prev
, next
, first
, last
, insn
;
3100 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3101 into a MEM later. Protect the libcall block from this change. */
3102 if (! REG_P (target
) || REG_USERVAR_P (target
))
3103 target
= gen_reg_rtx (GET_MODE (target
));
3105 /* If we're using non-call exceptions, a libcall corresponding to an
3106 operation that may trap may also trap. */
3107 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3109 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3112 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3114 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3115 remove_note (insn
, note
);
3119 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3120 reg note to indicate that this call cannot throw or execute a nonlocal
3121 goto (unless there is already a REG_EH_REGION note, in which case
3123 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3126 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3129 XEXP (note
, 0) = constm1_rtx
;
3131 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3135 /* First emit all insns that set pseudos. Remove them from the list as
3136 we go. Avoid insns that set pseudos which were referenced in previous
3137 insns. These can be generated by move_by_pieces, for example,
3138 to update an address. Similarly, avoid insns that reference things
3139 set in previous insns. */
3141 for (insn
= insns
; insn
; insn
= next
)
3143 rtx set
= single_set (insn
);
3146 /* Some ports (cris) create a libcall regions at their own. We must
3147 avoid any potential nesting of LIBCALLs. */
3148 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3149 remove_note (insn
, note
);
3150 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3151 remove_note (insn
, note
);
3153 next
= NEXT_INSN (insn
);
3155 if (set
!= 0 && REG_P (SET_DEST (set
))
3156 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3158 || ((! INSN_P(insns
)
3159 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3160 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3161 && ! modified_in_p (SET_SRC (set
), insns
)
3162 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3164 if (PREV_INSN (insn
))
3165 NEXT_INSN (PREV_INSN (insn
)) = next
;
3170 PREV_INSN (next
) = PREV_INSN (insn
);
3175 /* Some ports use a loop to copy large arguments onto the stack.
3176 Don't move anything outside such a loop. */
3181 prev
= get_last_insn ();
3183 /* Write the remaining insns followed by the final copy. */
3185 for (insn
= insns
; insn
; insn
= next
)
3187 next
= NEXT_INSN (insn
);
3192 last
= emit_move_insn (target
, result
);
3193 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3194 != CODE_FOR_nothing
)
3195 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3198 /* Remove any existing REG_EQUAL note from "last", or else it will
3199 be mistaken for a note referring to the full contents of the
3200 libcall value when found together with the REG_RETVAL note added
3201 below. An existing note can come from an insn expansion at
3203 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3206 if (final_dest
!= target
)
3207 emit_move_insn (final_dest
, target
);
3210 first
= get_insns ();
3212 first
= NEXT_INSN (prev
);
3214 /* Encapsulate the block so it gets manipulated as a unit. */
3215 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3217 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3218 when the encapsulated region would not be in one basic block,
3219 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3221 bool attach_libcall_retval_notes
= true;
3222 next
= NEXT_INSN (last
);
3223 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3224 if (control_flow_insn_p (insn
))
3226 attach_libcall_retval_notes
= false;
3230 if (attach_libcall_retval_notes
)
3232 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3234 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3240 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3241 PURPOSE describes how this comparison will be used. CODE is the rtx
3242 comparison code we will be using.
3244 ??? Actually, CODE is slightly weaker than that. A target is still
3245 required to implement all of the normal bcc operations, but not
3246 required to implement all (or any) of the unordered bcc operations. */
3249 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3250 enum can_compare_purpose purpose
)
3254 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3256 if (purpose
== ccp_jump
)
3257 return bcc_gen_fctn
[(int) code
] != NULL
;
3258 else if (purpose
== ccp_store_flag
)
3259 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3261 /* There's only one cmov entry point, and it's allowed to fail. */
3264 if (purpose
== ccp_jump
3265 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3267 if (purpose
== ccp_cmov
3268 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3270 if (purpose
== ccp_store_flag
3271 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3273 mode
= GET_MODE_WIDER_MODE (mode
);
3275 while (mode
!= VOIDmode
);
3280 /* This function is called when we are going to emit a compare instruction that
3281 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3283 *PMODE is the mode of the inputs (in case they are const_int).
3284 *PUNSIGNEDP nonzero says that the operands are unsigned;
3285 this matters if they need to be widened.
3287 If they have mode BLKmode, then SIZE specifies the size of both operands.
3289 This function performs all the setup necessary so that the caller only has
3290 to emit a single comparison insn. This setup can involve doing a BLKmode
3291 comparison or emitting a library call to perform the comparison if no insn
3292 is available to handle it.
3293 The values which are passed in through pointers can be modified; the caller
3294 should perform the comparison on the modified values. */
3297 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3298 enum machine_mode
*pmode
, int *punsignedp
,
3299 enum can_compare_purpose purpose
)
3301 enum machine_mode mode
= *pmode
;
3302 rtx x
= *px
, y
= *py
;
3303 int unsignedp
= *punsignedp
;
3304 enum mode_class
class;
3306 class = GET_MODE_CLASS (mode
);
3308 /* They could both be VOIDmode if both args are immediate constants,
3309 but we should fold that at an earlier stage.
3310 With no special code here, this will call abort,
3311 reminding the programmer to implement such folding. */
3313 if (mode
!= BLKmode
&& flag_force_mem
)
3315 /* Load duplicate non-volatile operands once. */
3316 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3318 x
= force_not_mem (x
);
3323 x
= force_not_mem (x
);
3324 y
= force_not_mem (y
);
3328 /* If we are inside an appropriately-short loop and we are optimizing,
3329 force expensive constants into a register. */
3330 if (CONSTANT_P (x
) && optimize
3331 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3332 x
= force_reg (mode
, x
);
3334 if (CONSTANT_P (y
) && optimize
3335 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3336 y
= force_reg (mode
, y
);
3339 /* Abort if we have a non-canonical comparison. The RTL documentation
3340 states that canonical comparisons are required only for targets which
3342 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3346 /* Don't let both operands fail to indicate the mode. */
3347 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3348 x
= force_reg (mode
, x
);
3350 /* Handle all BLKmode compares. */
3352 if (mode
== BLKmode
)
3354 enum machine_mode cmp_mode
, result_mode
;
3355 enum insn_code cmp_code
;
3360 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3365 /* Try to use a memory block compare insn - either cmpstr
3366 or cmpmem will do. */
3367 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3368 cmp_mode
!= VOIDmode
;
3369 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3371 cmp_code
= cmpmem_optab
[cmp_mode
];
3372 if (cmp_code
== CODE_FOR_nothing
)
3373 cmp_code
= cmpstr_optab
[cmp_mode
];
3374 if (cmp_code
== CODE_FOR_nothing
)
3377 /* Must make sure the size fits the insn's mode. */
3378 if ((GET_CODE (size
) == CONST_INT
3379 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3380 || (GET_MODE_BITSIZE (GET_MODE (size
))
3381 > GET_MODE_BITSIZE (cmp_mode
)))
3384 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3385 result
= gen_reg_rtx (result_mode
);
3386 size
= convert_to_mode (cmp_mode
, size
, 1);
3387 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3391 *pmode
= result_mode
;
3395 /* Otherwise call a library function, memcmp. */
3396 libfunc
= memcmp_libfunc
;
3397 length_type
= sizetype
;
3398 result_mode
= TYPE_MODE (integer_type_node
);
3399 cmp_mode
= TYPE_MODE (length_type
);
3400 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3401 TYPE_UNSIGNED (length_type
));
3403 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3410 *pmode
= result_mode
;
3414 /* Don't allow operands to the compare to trap, as that can put the
3415 compare and branch in different basic blocks. */
3416 if (flag_non_call_exceptions
)
3419 x
= force_reg (mode
, x
);
3421 y
= force_reg (mode
, y
);
3426 if (can_compare_p (*pcomparison
, mode
, purpose
))
3429 /* Handle a lib call just for the mode we are using. */
3431 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3433 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3436 /* If we want unsigned, and this mode has a distinct unsigned
3437 comparison routine, use that. */
3438 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3439 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3441 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3442 word_mode
, 2, x
, mode
, y
, mode
);
3446 if (TARGET_LIB_INT_CMP_BIASED
)
3447 /* Integer comparison returns a result that must be compared
3448 against 1, so that even if we do an unsigned compare
3449 afterward, there is still a value that can represent the
3450 result "less than". */
3460 if (class == MODE_FLOAT
)
3461 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3467 /* Before emitting an insn with code ICODE, make sure that X, which is going
3468 to be used for operand OPNUM of the insn, is converted from mode MODE to
3469 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3470 that it is accepted by the operand predicate. Return the new value. */
3473 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3474 enum machine_mode wider_mode
, int unsignedp
)
3476 if (mode
!= wider_mode
)
3477 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3479 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3480 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3484 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3490 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3491 we can do the comparison.
3492 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3493 be NULL_RTX which indicates that only a comparison is to be generated. */
3496 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3497 enum rtx_code comparison
, int unsignedp
, rtx label
)
3499 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3500 enum mode_class
class = GET_MODE_CLASS (mode
);
3501 enum machine_mode wider_mode
= mode
;
3503 /* Try combined insns first. */
3506 enum insn_code icode
;
3507 PUT_MODE (test
, wider_mode
);
3511 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3513 if (icode
!= CODE_FOR_nothing
3514 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3516 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3517 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3518 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3523 /* Handle some compares against zero. */
3524 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3525 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3527 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3528 emit_insn (GEN_FCN (icode
) (x
));
3530 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3534 /* Handle compares for which there is a directly suitable insn. */
3536 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3537 if (icode
!= CODE_FOR_nothing
)
3539 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3540 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3541 emit_insn (GEN_FCN (icode
) (x
, y
));
3543 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3547 if (class != MODE_INT
&& class != MODE_FLOAT
3548 && class != MODE_COMPLEX_FLOAT
)
3551 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3553 while (wider_mode
!= VOIDmode
);
3558 /* Generate code to compare X with Y so that the condition codes are
3559 set and to jump to LABEL if the condition is true. If X is a
3560 constant and Y is not a constant, then the comparison is swapped to
3561 ensure that the comparison RTL has the canonical form.
3563 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3564 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3565 the proper branch condition code.
3567 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3569 MODE is the mode of the inputs (in case they are const_int).
3571 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3572 be passed unchanged to emit_cmp_insn, then potentially converted into an
3573 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3576 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3577 enum machine_mode mode
, int unsignedp
, rtx label
)
3579 rtx op0
= x
, op1
= y
;
3581 /* Swap operands and condition to ensure canonical RTL. */
3582 if (swap_commutative_operands_p (x
, y
))
3584 /* If we're not emitting a branch, this means some caller
3590 comparison
= swap_condition (comparison
);
3594 /* If OP0 is still a constant, then both X and Y must be constants. Force
3595 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3597 if (CONSTANT_P (op0
))
3598 op0
= force_reg (mode
, op0
);
3602 comparison
= unsigned_condition (comparison
);
3604 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3606 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3609 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3612 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3613 enum machine_mode mode
, int unsignedp
)
3615 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3618 /* Emit a library call comparison between floating point X and Y.
3619 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3622 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3623 enum machine_mode
*pmode
, int *punsignedp
)
3625 enum rtx_code comparison
= *pcomparison
;
3626 enum rtx_code swapped
= swap_condition (comparison
);
3627 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3630 enum machine_mode orig_mode
= GET_MODE (x
);
3631 enum machine_mode mode
;
3632 rtx value
, target
, insns
, equiv
;
3634 bool reversed_p
= false;
3636 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3638 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3641 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3644 tmp
= x
; x
= y
; y
= tmp
;
3645 comparison
= swapped
;
3649 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3650 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3652 comparison
= reversed
;
3658 if (mode
== VOIDmode
)
3661 if (mode
!= orig_mode
)
3663 x
= convert_to_mode (mode
, x
, 0);
3664 y
= convert_to_mode (mode
, y
, 0);
3667 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3668 the RTL. The allows the RTL optimizers to delete the libcall if the
3669 condition can be determined at compile-time. */
3670 if (comparison
== UNORDERED
)
3672 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3673 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3674 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3675 temp
, const_true_rtx
, equiv
);
3679 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3680 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3682 rtx true_rtx
, false_rtx
;
3687 true_rtx
= const0_rtx
;
3688 false_rtx
= const_true_rtx
;
3692 true_rtx
= const_true_rtx
;
3693 false_rtx
= const0_rtx
;
3697 true_rtx
= const1_rtx
;
3698 false_rtx
= const0_rtx
;
3702 true_rtx
= const0_rtx
;
3703 false_rtx
= constm1_rtx
;
3707 true_rtx
= constm1_rtx
;
3708 false_rtx
= const0_rtx
;
3712 true_rtx
= const0_rtx
;
3713 false_rtx
= const1_rtx
;
3719 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3720 equiv
, true_rtx
, false_rtx
);
3725 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3726 word_mode
, 2, x
, mode
, y
, mode
);
3727 insns
= get_insns ();
3730 target
= gen_reg_rtx (word_mode
);
3731 emit_libcall_block (insns
, target
, value
, equiv
);
3733 if (comparison
== UNORDERED
3734 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3735 comparison
= reversed_p
? EQ
: NE
;
3740 *pcomparison
= comparison
;
3744 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3747 emit_indirect_jump (rtx loc
)
3749 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3751 loc
= copy_to_mode_reg (Pmode
, loc
);
3753 emit_jump_insn (gen_indirect_jump (loc
));
3757 #ifdef HAVE_conditional_move
3759 /* Emit a conditional move instruction if the machine supports one for that
3760 condition and machine mode.
3762 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3763 the mode to use should they be constants. If it is VOIDmode, they cannot
3766 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3767 should be stored there. MODE is the mode to use should they be constants.
3768 If it is VOIDmode, they cannot both be constants.
3770 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3771 is not supported. */
3774 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3775 enum machine_mode cmode
, rtx op2
, rtx op3
,
3776 enum machine_mode mode
, int unsignedp
)
3778 rtx tem
, subtarget
, comparison
, insn
;
3779 enum insn_code icode
;
3780 enum rtx_code reversed
;
3782 /* If one operand is constant, make it the second one. Only do this
3783 if the other operand is not constant as well. */
3785 if (swap_commutative_operands_p (op0
, op1
))
3790 code
= swap_condition (code
);
3793 /* get_condition will prefer to generate LT and GT even if the old
3794 comparison was against zero, so undo that canonicalization here since
3795 comparisons against zero are cheaper. */
3796 if (code
== LT
&& op1
== const1_rtx
)
3797 code
= LE
, op1
= const0_rtx
;
3798 else if (code
== GT
&& op1
== constm1_rtx
)
3799 code
= GE
, op1
= const0_rtx
;
3801 if (cmode
== VOIDmode
)
3802 cmode
= GET_MODE (op0
);
3804 if (swap_commutative_operands_p (op2
, op3
)
3805 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3814 if (mode
== VOIDmode
)
3815 mode
= GET_MODE (op2
);
3817 icode
= movcc_gen_code
[mode
];
3819 if (icode
== CODE_FOR_nothing
)
3824 op2
= force_not_mem (op2
);
3825 op3
= force_not_mem (op3
);
3829 target
= gen_reg_rtx (mode
);
3833 /* If the insn doesn't accept these operands, put them in pseudos. */
3835 if (! (*insn_data
[icode
].operand
[0].predicate
)
3836 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3837 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3839 if (! (*insn_data
[icode
].operand
[2].predicate
)
3840 (op2
, insn_data
[icode
].operand
[2].mode
))
3841 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3843 if (! (*insn_data
[icode
].operand
[3].predicate
)
3844 (op3
, insn_data
[icode
].operand
[3].mode
))
3845 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3847 /* Everything should now be in the suitable form, so emit the compare insn
3848 and then the conditional move. */
3851 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3853 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3854 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3855 return NULL and let the caller figure out how best to deal with this
3857 if (GET_CODE (comparison
) != code
)
3860 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3862 /* If that failed, then give up. */
3868 if (subtarget
!= target
)
3869 convert_move (target
, subtarget
, 0);
3874 /* Return nonzero if a conditional move of mode MODE is supported.
3876 This function is for combine so it can tell whether an insn that looks
3877 like a conditional move is actually supported by the hardware. If we
3878 guess wrong we lose a bit on optimization, but that's it. */
3879 /* ??? sparc64 supports conditionally moving integers values based on fp
3880 comparisons, and vice versa. How do we handle them? */
3883 can_conditionally_move_p (enum machine_mode mode
)
3885 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3891 #endif /* HAVE_conditional_move */
3893 /* Emit a conditional addition instruction if the machine supports one for that
3894 condition and machine mode.
3896 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3897 the mode to use should they be constants. If it is VOIDmode, they cannot
3900 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3901 should be stored there. MODE is the mode to use should they be constants.
3902 If it is VOIDmode, they cannot both be constants.
3904 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3905 is not supported. */
3908 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3909 enum machine_mode cmode
, rtx op2
, rtx op3
,
3910 enum machine_mode mode
, int unsignedp
)
3912 rtx tem
, subtarget
, comparison
, insn
;
3913 enum insn_code icode
;
3914 enum rtx_code reversed
;
3916 /* If one operand is constant, make it the second one. Only do this
3917 if the other operand is not constant as well. */
3919 if (swap_commutative_operands_p (op0
, op1
))
3924 code
= swap_condition (code
);
3927 /* get_condition will prefer to generate LT and GT even if the old
3928 comparison was against zero, so undo that canonicalization here since
3929 comparisons against zero are cheaper. */
3930 if (code
== LT
&& op1
== const1_rtx
)
3931 code
= LE
, op1
= const0_rtx
;
3932 else if (code
== GT
&& op1
== constm1_rtx
)
3933 code
= GE
, op1
= const0_rtx
;
3935 if (cmode
== VOIDmode
)
3936 cmode
= GET_MODE (op0
);
3938 if (swap_commutative_operands_p (op2
, op3
)
3939 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3948 if (mode
== VOIDmode
)
3949 mode
= GET_MODE (op2
);
3951 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3953 if (icode
== CODE_FOR_nothing
)
3958 op2
= force_not_mem (op2
);
3959 op3
= force_not_mem (op3
);
3963 target
= gen_reg_rtx (mode
);
3965 /* If the insn doesn't accept these operands, put them in pseudos. */
3967 if (! (*insn_data
[icode
].operand
[0].predicate
)
3968 (target
, insn_data
[icode
].operand
[0].mode
))
3969 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3973 if (! (*insn_data
[icode
].operand
[2].predicate
)
3974 (op2
, insn_data
[icode
].operand
[2].mode
))
3975 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3977 if (! (*insn_data
[icode
].operand
[3].predicate
)
3978 (op3
, insn_data
[icode
].operand
[3].mode
))
3979 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3981 /* Everything should now be in the suitable form, so emit the compare insn
3982 and then the conditional move. */
3985 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3987 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3988 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3989 return NULL and let the caller figure out how best to deal with this
3991 if (GET_CODE (comparison
) != code
)
3994 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3996 /* If that failed, then give up. */
4002 if (subtarget
!= target
)
4003 convert_move (target
, subtarget
, 0);
4008 /* These functions attempt to generate an insn body, rather than
4009 emitting the insn, but if the gen function already emits them, we
4010 make no attempt to turn them back into naked patterns. */
4012 /* Generate and return an insn body to add Y to X. */
4015 gen_add2_insn (rtx x
, rtx y
)
4017 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4019 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4020 (x
, insn_data
[icode
].operand
[0].mode
))
4021 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4022 (x
, insn_data
[icode
].operand
[1].mode
))
4023 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4024 (y
, insn_data
[icode
].operand
[2].mode
)))
4027 return (GEN_FCN (icode
) (x
, x
, y
));
4030 /* Generate and return an insn body to add r1 and c,
4031 storing the result in r0. */
4033 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4035 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4037 if (icode
== CODE_FOR_nothing
4038 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4039 (r0
, insn_data
[icode
].operand
[0].mode
))
4040 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4041 (r1
, insn_data
[icode
].operand
[1].mode
))
4042 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4043 (c
, insn_data
[icode
].operand
[2].mode
)))
4046 return (GEN_FCN (icode
) (r0
, r1
, c
));
4050 have_add2_insn (rtx x
, rtx y
)
4054 if (GET_MODE (x
) == VOIDmode
)
4057 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4059 if (icode
== CODE_FOR_nothing
)
4062 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4063 (x
, insn_data
[icode
].operand
[0].mode
))
4064 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4065 (x
, insn_data
[icode
].operand
[1].mode
))
4066 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4067 (y
, insn_data
[icode
].operand
[2].mode
)))
4073 /* Generate and return an insn body to subtract Y from X. */
4076 gen_sub2_insn (rtx x
, rtx y
)
4078 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4080 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4081 (x
, insn_data
[icode
].operand
[0].mode
))
4082 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4083 (x
, insn_data
[icode
].operand
[1].mode
))
4084 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4085 (y
, insn_data
[icode
].operand
[2].mode
)))
4088 return (GEN_FCN (icode
) (x
, x
, y
));
4091 /* Generate and return an insn body to subtract r1 and c,
4092 storing the result in r0. */
4094 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4096 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4098 if (icode
== CODE_FOR_nothing
4099 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4100 (r0
, insn_data
[icode
].operand
[0].mode
))
4101 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4102 (r1
, insn_data
[icode
].operand
[1].mode
))
4103 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4104 (c
, insn_data
[icode
].operand
[2].mode
)))
4107 return (GEN_FCN (icode
) (r0
, r1
, c
));
4111 have_sub2_insn (rtx x
, rtx y
)
4115 if (GET_MODE (x
) == VOIDmode
)
4118 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4120 if (icode
== CODE_FOR_nothing
)
4123 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4124 (x
, insn_data
[icode
].operand
[0].mode
))
4125 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4126 (x
, insn_data
[icode
].operand
[1].mode
))
4127 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4128 (y
, insn_data
[icode
].operand
[2].mode
)))
4134 /* Generate the body of an instruction to copy Y into X.
4135 It may be a list of insns, if one insn isn't enough. */
4138 gen_move_insn (rtx x
, rtx y
)
4143 emit_move_insn_1 (x
, y
);
4149 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4150 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4151 no such operation exists, CODE_FOR_nothing will be returned. */
4154 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4158 #ifdef HAVE_ptr_extend
4160 return CODE_FOR_ptr_extend
;
4163 tab
= unsignedp
? zext_optab
: sext_optab
;
4164 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4167 /* Generate the body of an insn to extend Y (with mode MFROM)
4168 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4171 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4172 enum machine_mode mfrom
, int unsignedp
)
4174 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4175 return GEN_FCN (icode
) (x
, y
);
4178 /* can_fix_p and can_float_p say whether the target machine
4179 can directly convert a given fixed point type to
4180 a given floating point type, or vice versa.
4181 The returned value is the CODE_FOR_... value to use,
4182 or CODE_FOR_nothing if these modes cannot be directly converted.
4184 *TRUNCP_PTR is set to 1 if it is necessary to output
4185 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4187 static enum insn_code
4188 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4189 int unsignedp
, int *truncp_ptr
)
4192 enum insn_code icode
;
4194 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4195 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4196 if (icode
!= CODE_FOR_nothing
)
4202 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4203 for this to work. We need to rework the fix* and ftrunc* patterns
4204 and documentation. */
4205 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4206 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4207 if (icode
!= CODE_FOR_nothing
4208 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4215 return CODE_FOR_nothing
;
4218 static enum insn_code
4219 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4224 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4225 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4228 /* Generate code to convert FROM to floating point
4229 and store in TO. FROM must be fixed point and not VOIDmode.
4230 UNSIGNEDP nonzero means regard FROM as unsigned.
4231 Normally this is done by correcting the final value
4232 if it is negative. */
4235 expand_float (rtx to
, rtx from
, int unsignedp
)
4237 enum insn_code icode
;
4239 enum machine_mode fmode
, imode
;
4241 /* Crash now, because we won't be able to decide which mode to use. */
4242 if (GET_MODE (from
) == VOIDmode
)
4245 /* Look for an insn to do the conversion. Do it in the specified
4246 modes if possible; otherwise convert either input, output or both to
4247 wider mode. If the integer mode is wider than the mode of FROM,
4248 we can do the conversion signed even if the input is unsigned. */
4250 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4251 fmode
= GET_MODE_WIDER_MODE (fmode
))
4252 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4253 imode
= GET_MODE_WIDER_MODE (imode
))
4255 int doing_unsigned
= unsignedp
;
4257 if (fmode
!= GET_MODE (to
)
4258 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4261 icode
= can_float_p (fmode
, imode
, unsignedp
);
4262 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4263 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4265 if (icode
!= CODE_FOR_nothing
)
4267 if (imode
!= GET_MODE (from
))
4268 from
= convert_to_mode (imode
, from
, unsignedp
);
4270 if (fmode
!= GET_MODE (to
))
4271 target
= gen_reg_rtx (fmode
);
4273 emit_unop_insn (icode
, target
, from
,
4274 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4277 convert_move (to
, target
, 0);
4282 /* Unsigned integer, and no way to convert directly.
4283 Convert as signed, then conditionally adjust the result. */
4286 rtx label
= gen_label_rtx ();
4288 REAL_VALUE_TYPE offset
;
4291 from
= force_not_mem (from
);
4293 /* Look for a usable floating mode FMODE wider than the source and at
4294 least as wide as the target. Using FMODE will avoid rounding woes
4295 with unsigned values greater than the signed maximum value. */
4297 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4298 fmode
= GET_MODE_WIDER_MODE (fmode
))
4299 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4300 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4303 if (fmode
== VOIDmode
)
4305 /* There is no such mode. Pretend the target is wide enough. */
4306 fmode
= GET_MODE (to
);
4308 /* Avoid double-rounding when TO is narrower than FROM. */
4309 if ((significand_size (fmode
) + 1)
4310 < GET_MODE_BITSIZE (GET_MODE (from
)))
4313 rtx neglabel
= gen_label_rtx ();
4315 /* Don't use TARGET if it isn't a register, is a hard register,
4316 or is the wrong mode. */
4318 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4319 || GET_MODE (target
) != fmode
)
4320 target
= gen_reg_rtx (fmode
);
4322 imode
= GET_MODE (from
);
4323 do_pending_stack_adjust ();
4325 /* Test whether the sign bit is set. */
4326 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4329 /* The sign bit is not set. Convert as signed. */
4330 expand_float (target
, from
, 0);
4331 emit_jump_insn (gen_jump (label
));
4334 /* The sign bit is set.
4335 Convert to a usable (positive signed) value by shifting right
4336 one bit, while remembering if a nonzero bit was shifted
4337 out; i.e., compute (from & 1) | (from >> 1). */
4339 emit_label (neglabel
);
4340 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4341 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4342 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4344 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4346 expand_float (target
, temp
, 0);
4348 /* Multiply by 2 to undo the shift above. */
4349 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4350 target
, 0, OPTAB_LIB_WIDEN
);
4352 emit_move_insn (target
, temp
);
4354 do_pending_stack_adjust ();
4360 /* If we are about to do some arithmetic to correct for an
4361 unsigned operand, do it in a pseudo-register. */
4363 if (GET_MODE (to
) != fmode
4364 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4365 target
= gen_reg_rtx (fmode
);
4367 /* Convert as signed integer to floating. */
4368 expand_float (target
, from
, 0);
4370 /* If FROM is negative (and therefore TO is negative),
4371 correct its value by 2**bitwidth. */
4373 do_pending_stack_adjust ();
4374 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4378 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4379 temp
= expand_binop (fmode
, add_optab
, target
,
4380 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4381 target
, 0, OPTAB_LIB_WIDEN
);
4383 emit_move_insn (target
, temp
);
4385 do_pending_stack_adjust ();
4390 /* No hardware instruction available; call a library routine. */
4395 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4397 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4398 from
= convert_to_mode (SImode
, from
, unsignedp
);
4401 from
= force_not_mem (from
);
4403 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4409 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4410 GET_MODE (to
), 1, from
,
4412 insns
= get_insns ();
4415 emit_libcall_block (insns
, target
, value
,
4416 gen_rtx_FLOAT (GET_MODE (to
), from
));
4421 /* Copy result to requested destination
4422 if we have been computing in a temp location. */
4426 if (GET_MODE (target
) == GET_MODE (to
))
4427 emit_move_insn (to
, target
);
4429 convert_move (to
, target
, 0);
4433 /* Generate code to convert FROM to fixed point and store in TO. FROM
4434 must be floating point. */
4437 expand_fix (rtx to
, rtx from
, int unsignedp
)
4439 enum insn_code icode
;
4441 enum machine_mode fmode
, imode
;
4444 /* We first try to find a pair of modes, one real and one integer, at
4445 least as wide as FROM and TO, respectively, in which we can open-code
4446 this conversion. If the integer mode is wider than the mode of TO,
4447 we can do the conversion either signed or unsigned. */
4449 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4450 fmode
= GET_MODE_WIDER_MODE (fmode
))
4451 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4452 imode
= GET_MODE_WIDER_MODE (imode
))
4454 int doing_unsigned
= unsignedp
;
4456 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4457 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4458 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4460 if (icode
!= CODE_FOR_nothing
)
4462 if (fmode
!= GET_MODE (from
))
4463 from
= convert_to_mode (fmode
, from
, 0);
4467 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4468 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4472 if (imode
!= GET_MODE (to
))
4473 target
= gen_reg_rtx (imode
);
4475 emit_unop_insn (icode
, target
, from
,
4476 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4478 convert_move (to
, target
, unsignedp
);
4483 /* For an unsigned conversion, there is one more way to do it.
4484 If we have a signed conversion, we generate code that compares
4485 the real value to the largest representable positive number. If if
4486 is smaller, the conversion is done normally. Otherwise, subtract
4487 one plus the highest signed number, convert, and add it back.
4489 We only need to check all real modes, since we know we didn't find
4490 anything with a wider integer mode.
4492 This code used to extend FP value into mode wider than the destination.
4493 This is not needed. Consider, for instance conversion from SFmode
4496 The hot path trought the code is dealing with inputs smaller than 2^63
4497 and doing just the conversion, so there is no bits to lose.
4499 In the other path we know the value is positive in the range 2^63..2^64-1
4500 inclusive. (as for other imput overflow happens and result is undefined)
4501 So we know that the most important bit set in mantissa corresponds to
4502 2^63. The subtraction of 2^63 should not generate any rounding as it
4503 simply clears out that bit. The rest is trivial. */
4505 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4506 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4507 fmode
= GET_MODE_WIDER_MODE (fmode
))
4508 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4512 REAL_VALUE_TYPE offset
;
4513 rtx limit
, lab1
, lab2
, insn
;
4515 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4516 real_2expN (&offset
, bitsize
- 1);
4517 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4518 lab1
= gen_label_rtx ();
4519 lab2
= gen_label_rtx ();
4522 from
= force_not_mem (from
);
4524 if (fmode
!= GET_MODE (from
))
4525 from
= convert_to_mode (fmode
, from
, 0);
4527 /* See if we need to do the subtraction. */
4528 do_pending_stack_adjust ();
4529 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4532 /* If not, do the signed "fix" and branch around fixup code. */
4533 expand_fix (to
, from
, 0);
4534 emit_jump_insn (gen_jump (lab2
));
4537 /* Otherwise, subtract 2**(N-1), convert to signed number,
4538 then add 2**(N-1). Do the addition using XOR since this
4539 will often generate better code. */
4541 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4542 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4543 expand_fix (to
, target
, 0);
4544 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4546 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4548 to
, 1, OPTAB_LIB_WIDEN
);
4551 emit_move_insn (to
, target
);
4555 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4556 != CODE_FOR_nothing
)
4558 /* Make a place for a REG_NOTE and add it. */
4559 insn
= emit_move_insn (to
, to
);
4560 set_unique_reg_note (insn
,
4562 gen_rtx_fmt_e (UNSIGNED_FIX
,
4570 /* We can't do it with an insn, so use a library call. But first ensure
4571 that the mode of TO is at least as wide as SImode, since those are the
4572 only library calls we know about. */
4574 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4576 target
= gen_reg_rtx (SImode
);
4578 expand_fix (target
, from
, unsignedp
);
4586 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4587 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4592 from
= force_not_mem (from
);
4596 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4597 GET_MODE (to
), 1, from
,
4599 insns
= get_insns ();
4602 emit_libcall_block (insns
, target
, value
,
4603 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4604 GET_MODE (to
), from
));
4609 if (GET_MODE (to
) == GET_MODE (target
))
4610 emit_move_insn (to
, target
);
4612 convert_move (to
, target
, 0);
4616 /* Report whether we have an instruction to perform the operation
4617 specified by CODE on operands of mode MODE. */
4619 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4621 return (code_to_optab
[(int) code
] != 0
4622 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4623 != CODE_FOR_nothing
));
4626 /* Create a blank optab. */
4631 optab op
= ggc_alloc (sizeof (struct optab
));
4632 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4634 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4635 op
->handlers
[i
].libfunc
= 0;
4641 static convert_optab
4642 new_convert_optab (void)
4645 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4646 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4647 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4649 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4650 op
->handlers
[i
][j
].libfunc
= 0;
4655 /* Same, but fill in its code as CODE, and write it into the
4656 code_to_optab table. */
4658 init_optab (enum rtx_code code
)
4660 optab op
= new_optab ();
4662 code_to_optab
[(int) code
] = op
;
4666 /* Same, but fill in its code as CODE, and do _not_ write it into
4667 the code_to_optab table. */
4669 init_optabv (enum rtx_code code
)
4671 optab op
= new_optab ();
4676 /* Conversion optabs never go in the code_to_optab table. */
4677 static inline convert_optab
4678 init_convert_optab (enum rtx_code code
)
4680 convert_optab op
= new_convert_optab ();
4685 /* Initialize the libfunc fields of an entire group of entries in some
4686 optab. Each entry is set equal to a string consisting of a leading
4687 pair of underscores followed by a generic operation name followed by
4688 a mode name (downshifted to lowercase) followed by a single character
4689 representing the number of operands for the given operation (which is
4690 usually one of the characters '2', '3', or '4').
4692 OPTABLE is the table in which libfunc fields are to be initialized.
4693 FIRST_MODE is the first machine mode index in the given optab to
4695 LAST_MODE is the last machine mode index in the given optab to
4697 OPNAME is the generic (string) name of the operation.
4698 SUFFIX is the character which specifies the number of operands for
4699 the given generic operation.
4703 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4704 const char *opname
, int suffix
)
4707 unsigned opname_len
= strlen (opname
);
4709 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4710 mode
= (enum machine_mode
) ((int) mode
+ 1))
4712 const char *mname
= GET_MODE_NAME (mode
);
4713 unsigned mname_len
= strlen (mname
);
4714 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4721 for (q
= opname
; *q
; )
4723 for (q
= mname
; *q
; q
++)
4724 *p
++ = TOLOWER (*q
);
4728 optable
->handlers
[(int) mode
].libfunc
4729 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4733 /* Initialize the libfunc fields of an entire group of entries in some
4734 optab which correspond to all integer mode operations. The parameters
4735 have the same meaning as similarly named ones for the `init_libfuncs'
4736 routine. (See above). */
4739 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4741 int maxsize
= 2*BITS_PER_WORD
;
4742 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4743 maxsize
= LONG_LONG_TYPE_SIZE
;
4744 init_libfuncs (optable
, word_mode
,
4745 mode_for_size (maxsize
, MODE_INT
, 0),
4749 /* Initialize the libfunc fields of an entire group of entries in some
4750 optab which correspond to all real mode operations. The parameters
4751 have the same meaning as similarly named ones for the `init_libfuncs'
4752 routine. (See above). */
4755 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4757 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4760 /* Initialize the libfunc fields of an entire group of entries of an
4761 inter-mode-class conversion optab. The string formation rules are
4762 similar to the ones for init_libfuncs, above, but instead of having
4763 a mode name and an operand count these functions have two mode names
4764 and no operand count. */
4766 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4767 enum mode_class from_class
,
4768 enum mode_class to_class
)
4770 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4771 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4772 size_t opname_len
= strlen (opname
);
4773 size_t max_mname_len
= 0;
4775 enum machine_mode fmode
, tmode
;
4776 const char *fname
, *tname
;
4778 char *libfunc_name
, *suffix
;
4781 for (fmode
= first_from_mode
;
4783 fmode
= GET_MODE_WIDER_MODE (fmode
))
4784 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4786 for (tmode
= first_to_mode
;
4788 tmode
= GET_MODE_WIDER_MODE (tmode
))
4789 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4791 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4792 libfunc_name
[0] = '_';
4793 libfunc_name
[1] = '_';
4794 memcpy (&libfunc_name
[2], opname
, opname_len
);
4795 suffix
= libfunc_name
+ opname_len
+ 2;
4797 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4798 fmode
= GET_MODE_WIDER_MODE (fmode
))
4799 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4800 tmode
= GET_MODE_WIDER_MODE (tmode
))
4802 fname
= GET_MODE_NAME (fmode
);
4803 tname
= GET_MODE_NAME (tmode
);
4806 for (q
= fname
; *q
; p
++, q
++)
4808 for (q
= tname
; *q
; p
++, q
++)
4813 tab
->handlers
[tmode
][fmode
].libfunc
4814 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4819 /* Initialize the libfunc fields of an entire group of entries of an
4820 intra-mode-class conversion optab. The string formation rules are
4821 similar to the ones for init_libfunc, above. WIDENING says whether
4822 the optab goes from narrow to wide modes or vice versa. These functions
4823 have two mode names _and_ an operand count. */
4825 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4826 enum mode_class
class, bool widening
)
4828 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4829 size_t opname_len
= strlen (opname
);
4830 size_t max_mname_len
= 0;
4832 enum machine_mode nmode
, wmode
;
4833 const char *nname
, *wname
;
4835 char *libfunc_name
, *suffix
;
4838 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4839 nmode
= GET_MODE_WIDER_MODE (nmode
))
4840 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4842 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4843 libfunc_name
[0] = '_';
4844 libfunc_name
[1] = '_';
4845 memcpy (&libfunc_name
[2], opname
, opname_len
);
4846 suffix
= libfunc_name
+ opname_len
+ 2;
4848 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4849 nmode
= GET_MODE_WIDER_MODE (nmode
))
4850 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4851 wmode
= GET_MODE_WIDER_MODE (wmode
))
4853 nname
= GET_MODE_NAME (nmode
);
4854 wname
= GET_MODE_NAME (wmode
);
4857 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4859 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4865 tab
->handlers
[widening
? wmode
: nmode
]
4866 [widening
? nmode
: wmode
].libfunc
4867 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4874 init_one_libfunc (const char *name
)
4878 /* Create a FUNCTION_DECL that can be passed to
4879 targetm.encode_section_info. */
4880 /* ??? We don't have any type information except for this is
4881 a function. Pretend this is "int foo()". */
4882 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4883 build_function_type (integer_type_node
, NULL_TREE
));
4884 DECL_ARTIFICIAL (decl
) = 1;
4885 DECL_EXTERNAL (decl
) = 1;
4886 TREE_PUBLIC (decl
) = 1;
4888 symbol
= XEXP (DECL_RTL (decl
), 0);
4890 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4891 are the flags assigned by targetm.encode_section_info. */
4892 SYMBOL_REF_DECL (symbol
) = 0;
4897 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4898 MODE to NAME, which should be either 0 or a string constant. */
4900 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4903 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4905 optable
->handlers
[mode
].libfunc
= 0;
4908 /* Call this to reset the function entry for one conversion optab
4909 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4910 either 0 or a string constant. */
4912 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4913 enum machine_mode fmode
, const char *name
)
4916 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4918 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4921 /* Call this once to initialize the contents of the optabs
4922 appropriately for the current target machine. */
4929 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4931 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4932 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4934 #ifdef HAVE_conditional_move
4935 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4936 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4939 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4941 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4942 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4945 add_optab
= init_optab (PLUS
);
4946 addv_optab
= init_optabv (PLUS
);
4947 sub_optab
= init_optab (MINUS
);
4948 subv_optab
= init_optabv (MINUS
);
4949 smul_optab
= init_optab (MULT
);
4950 smulv_optab
= init_optabv (MULT
);
4951 smul_highpart_optab
= init_optab (UNKNOWN
);
4952 umul_highpart_optab
= init_optab (UNKNOWN
);
4953 smul_widen_optab
= init_optab (UNKNOWN
);
4954 umul_widen_optab
= init_optab (UNKNOWN
);
4955 sdiv_optab
= init_optab (DIV
);
4956 sdivv_optab
= init_optabv (DIV
);
4957 sdivmod_optab
= init_optab (UNKNOWN
);
4958 udiv_optab
= init_optab (UDIV
);
4959 udivmod_optab
= init_optab (UNKNOWN
);
4960 smod_optab
= init_optab (MOD
);
4961 umod_optab
= init_optab (UMOD
);
4962 fmod_optab
= init_optab (UNKNOWN
);
4963 drem_optab
= init_optab (UNKNOWN
);
4964 ftrunc_optab
= init_optab (UNKNOWN
);
4965 and_optab
= init_optab (AND
);
4966 ior_optab
= init_optab (IOR
);
4967 xor_optab
= init_optab (XOR
);
4968 ashl_optab
= init_optab (ASHIFT
);
4969 ashr_optab
= init_optab (ASHIFTRT
);
4970 lshr_optab
= init_optab (LSHIFTRT
);
4971 rotl_optab
= init_optab (ROTATE
);
4972 rotr_optab
= init_optab (ROTATERT
);
4973 smin_optab
= init_optab (SMIN
);
4974 smax_optab
= init_optab (SMAX
);
4975 umin_optab
= init_optab (UMIN
);
4976 umax_optab
= init_optab (UMAX
);
4977 pow_optab
= init_optab (UNKNOWN
);
4978 atan2_optab
= init_optab (UNKNOWN
);
4980 /* These three have codes assigned exclusively for the sake of
4982 mov_optab
= init_optab (SET
);
4983 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4984 cmp_optab
= init_optab (COMPARE
);
4986 ucmp_optab
= init_optab (UNKNOWN
);
4987 tst_optab
= init_optab (UNKNOWN
);
4989 eq_optab
= init_optab (EQ
);
4990 ne_optab
= init_optab (NE
);
4991 gt_optab
= init_optab (GT
);
4992 ge_optab
= init_optab (GE
);
4993 lt_optab
= init_optab (LT
);
4994 le_optab
= init_optab (LE
);
4995 unord_optab
= init_optab (UNORDERED
);
4997 neg_optab
= init_optab (NEG
);
4998 negv_optab
= init_optabv (NEG
);
4999 abs_optab
= init_optab (ABS
);
5000 absv_optab
= init_optabv (ABS
);
5001 addcc_optab
= init_optab (UNKNOWN
);
5002 one_cmpl_optab
= init_optab (NOT
);
5003 ffs_optab
= init_optab (FFS
);
5004 clz_optab
= init_optab (CLZ
);
5005 ctz_optab
= init_optab (CTZ
);
5006 popcount_optab
= init_optab (POPCOUNT
);
5007 parity_optab
= init_optab (PARITY
);
5008 sqrt_optab
= init_optab (SQRT
);
5009 floor_optab
= init_optab (UNKNOWN
);
5010 ceil_optab
= init_optab (UNKNOWN
);
5011 round_optab
= init_optab (UNKNOWN
);
5012 btrunc_optab
= init_optab (UNKNOWN
);
5013 nearbyint_optab
= init_optab (UNKNOWN
);
5014 rint_optab
= init_optab (UNKNOWN
);
5015 sincos_optab
= init_optab (UNKNOWN
);
5016 sin_optab
= init_optab (UNKNOWN
);
5017 asin_optab
= init_optab (UNKNOWN
);
5018 cos_optab
= init_optab (UNKNOWN
);
5019 acos_optab
= init_optab (UNKNOWN
);
5020 exp_optab
= init_optab (UNKNOWN
);
5021 exp10_optab
= init_optab (UNKNOWN
);
5022 exp2_optab
= init_optab (UNKNOWN
);
5023 expm1_optab
= init_optab (UNKNOWN
);
5024 ldexp_optab
= init_optab (UNKNOWN
);
5025 logb_optab
= init_optab (UNKNOWN
);
5026 ilogb_optab
= init_optab (UNKNOWN
);
5027 log_optab
= init_optab (UNKNOWN
);
5028 log10_optab
= init_optab (UNKNOWN
);
5029 log2_optab
= init_optab (UNKNOWN
);
5030 log1p_optab
= init_optab (UNKNOWN
);
5031 tan_optab
= init_optab (UNKNOWN
);
5032 atan_optab
= init_optab (UNKNOWN
);
5033 copysign_optab
= init_optab (UNKNOWN
);
5035 strlen_optab
= init_optab (UNKNOWN
);
5036 cbranch_optab
= init_optab (UNKNOWN
);
5037 cmov_optab
= init_optab (UNKNOWN
);
5038 cstore_optab
= init_optab (UNKNOWN
);
5039 push_optab
= init_optab (UNKNOWN
);
5041 vec_extract_optab
= init_optab (UNKNOWN
);
5042 vec_set_optab
= init_optab (UNKNOWN
);
5043 vec_init_optab
= init_optab (UNKNOWN
);
5044 vec_realign_load_optab
= init_optab (UNKNOWN
);
5045 movmisalign_optab
= init_optab (UNKNOWN
);
5047 powi_optab
= init_optab (UNKNOWN
);
5050 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5051 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5052 trunc_optab
= init_convert_optab (TRUNCATE
);
5053 sfix_optab
= init_convert_optab (FIX
);
5054 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5055 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5056 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5057 sfloat_optab
= init_convert_optab (FLOAT
);
5058 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5060 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5062 movmem_optab
[i
] = CODE_FOR_nothing
;
5063 clrmem_optab
[i
] = CODE_FOR_nothing
;
5064 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5065 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5067 #ifdef HAVE_SECONDARY_RELOADS
5068 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5072 /* Fill in the optabs with the insns we support. */
5075 /* Initialize the optabs with the names of the library functions. */
5076 init_integral_libfuncs (add_optab
, "add", '3');
5077 init_floating_libfuncs (add_optab
, "add", '3');
5078 init_integral_libfuncs (addv_optab
, "addv", '3');
5079 init_floating_libfuncs (addv_optab
, "add", '3');
5080 init_integral_libfuncs (sub_optab
, "sub", '3');
5081 init_floating_libfuncs (sub_optab
, "sub", '3');
5082 init_integral_libfuncs (subv_optab
, "subv", '3');
5083 init_floating_libfuncs (subv_optab
, "sub", '3');
5084 init_integral_libfuncs (smul_optab
, "mul", '3');
5085 init_floating_libfuncs (smul_optab
, "mul", '3');
5086 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5087 init_floating_libfuncs (smulv_optab
, "mul", '3');
5088 init_integral_libfuncs (sdiv_optab
, "div", '3');
5089 init_floating_libfuncs (sdiv_optab
, "div", '3');
5090 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5091 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5092 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5093 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5094 init_integral_libfuncs (smod_optab
, "mod", '3');
5095 init_integral_libfuncs (umod_optab
, "umod", '3');
5096 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5097 init_integral_libfuncs (and_optab
, "and", '3');
5098 init_integral_libfuncs (ior_optab
, "ior", '3');
5099 init_integral_libfuncs (xor_optab
, "xor", '3');
5100 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5101 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5102 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5103 init_integral_libfuncs (smin_optab
, "min", '3');
5104 init_floating_libfuncs (smin_optab
, "min", '3');
5105 init_integral_libfuncs (smax_optab
, "max", '3');
5106 init_floating_libfuncs (smax_optab
, "max", '3');
5107 init_integral_libfuncs (umin_optab
, "umin", '3');
5108 init_integral_libfuncs (umax_optab
, "umax", '3');
5109 init_integral_libfuncs (neg_optab
, "neg", '2');
5110 init_floating_libfuncs (neg_optab
, "neg", '2');
5111 init_integral_libfuncs (negv_optab
, "negv", '2');
5112 init_floating_libfuncs (negv_optab
, "neg", '2');
5113 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5114 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5115 init_integral_libfuncs (clz_optab
, "clz", '2');
5116 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5117 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5118 init_integral_libfuncs (parity_optab
, "parity", '2');
5120 /* Comparison libcalls for integers MUST come in pairs,
5122 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5123 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5124 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5126 /* EQ etc are floating point only. */
5127 init_floating_libfuncs (eq_optab
, "eq", '2');
5128 init_floating_libfuncs (ne_optab
, "ne", '2');
5129 init_floating_libfuncs (gt_optab
, "gt", '2');
5130 init_floating_libfuncs (ge_optab
, "ge", '2');
5131 init_floating_libfuncs (lt_optab
, "lt", '2');
5132 init_floating_libfuncs (le_optab
, "le", '2');
5133 init_floating_libfuncs (unord_optab
, "unord", '2');
5135 init_floating_libfuncs (powi_optab
, "powi", '2');
5138 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5139 MODE_INT
, MODE_FLOAT
);
5140 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5141 MODE_FLOAT
, MODE_INT
);
5142 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5143 MODE_FLOAT
, MODE_INT
);
5145 /* sext_optab is also used for FLOAT_EXTEND. */
5146 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5147 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5149 /* Use cabs for double complex abs, since systems generally have cabs.
5150 Don't define any libcall for float complex, so that cabs will be used. */
5151 if (complex_double_type_node
)
5152 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5153 = init_one_libfunc ("cabs");
5155 /* The ffs function operates on `int'. */
5156 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5157 = init_one_libfunc ("ffs");
5159 abort_libfunc
= init_one_libfunc ("abort");
5160 memcpy_libfunc
= init_one_libfunc ("memcpy");
5161 memmove_libfunc
= init_one_libfunc ("memmove");
5162 memcmp_libfunc
= init_one_libfunc ("memcmp");
5163 memset_libfunc
= init_one_libfunc ("memset");
5164 setbits_libfunc
= init_one_libfunc ("__setbits");
5166 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5167 ? "_Unwind_SjLj_Resume"
5168 : "_Unwind_Resume");
5169 #ifndef DONT_USE_BUILTIN_SETJMP
5170 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5171 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5173 setjmp_libfunc
= init_one_libfunc ("setjmp");
5174 longjmp_libfunc
= init_one_libfunc ("longjmp");
5176 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5177 unwind_sjlj_unregister_libfunc
5178 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5180 /* For function entry/exit instrumentation. */
5181 profile_function_entry_libfunc
5182 = init_one_libfunc ("__cyg_profile_func_enter");
5183 profile_function_exit_libfunc
5184 = init_one_libfunc ("__cyg_profile_func_exit");
5186 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5188 if (HAVE_conditional_trap
)
5189 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5191 /* Allow the target to add more libcalls or rename some, etc. */
5192 targetm
.init_libfuncs ();
5197 /* Print information about the current contents of the optabs on
5201 debug_optab_libfuncs (void)
5207 /* Dump the arithmetic optabs. */
5208 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5209 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5212 struct optab_handlers
*h
;
5215 h
= &o
->handlers
[j
];
5218 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5220 fprintf (stderr
, "%s\t%s:\t%s\n",
5221 GET_RTX_NAME (o
->code
),
5223 XSTR (h
->libfunc
, 0));
5227 /* Dump the conversion optabs. */
5228 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5229 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5230 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5233 struct optab_handlers
*h
;
5235 o
= &convert_optab_table
[i
];
5236 h
= &o
->handlers
[j
][k
];
5239 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5241 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5242 GET_RTX_NAME (o
->code
),
5245 XSTR (h
->libfunc
, 0));
5253 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5254 CODE. Return 0 on failure. */
5257 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5258 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5260 enum machine_mode mode
= GET_MODE (op1
);
5261 enum insn_code icode
;
5264 if (!HAVE_conditional_trap
)
5267 if (mode
== VOIDmode
)
5270 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5271 if (icode
== CODE_FOR_nothing
)
5275 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5276 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5282 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5284 PUT_CODE (trap_rtx
, code
);
5285 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5289 insn
= get_insns ();
5296 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5297 or unsigned operation code. */
5299 static enum rtx_code
5300 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5312 code
= unsignedp
? LTU
: LT
;
5315 code
= unsignedp
? LEU
: LE
;
5318 code
= unsignedp
? GTU
: GT
;
5321 code
= unsignedp
? GEU
: GE
;
5324 case UNORDERED_EXPR
:
5355 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5356 unsigned operators. Do not generate compare instruction. */
5359 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5361 enum rtx_code rcode
;
5363 rtx rtx_op0
, rtx_op1
;
5365 if (!COMPARISON_CLASS_P (cond
))
5367 /* This is unlikely. While generating VEC_COND_EXPR,
5368 auto vectorizer ensures that condition is a relational
5374 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5375 t_op0
= TREE_OPERAND (cond
, 0);
5376 t_op1
= TREE_OPERAND (cond
, 1);
5379 /* Expand operands. */
5380 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5381 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5383 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5384 && GET_MODE (rtx_op0
) != VOIDmode
)
5385 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5387 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5388 && GET_MODE (rtx_op1
) != VOIDmode
)
5389 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5391 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5394 /* Return insn code for VEC_COND_EXPR EXPR. */
5396 static inline enum insn_code
5397 get_vcond_icode (tree expr
, enum machine_mode mode
)
5399 enum insn_code icode
= CODE_FOR_nothing
;
5401 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5402 icode
= vcondu_gen_code
[mode
];
5404 icode
= vcond_gen_code
[mode
];
5408 /* Return TRUE iff, appropriate vector insns are available
5409 for vector cond expr expr in VMODE mode. */
5412 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5414 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5419 /* Generate insns for VEC_COND_EXPR. */
5422 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5424 enum insn_code icode
;
5425 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5426 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5427 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5429 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5430 if (icode
== CODE_FOR_nothing
)
5434 target
= gen_reg_rtx (mode
);
5436 /* Get comparison rtx. First expand both cond expr operands. */
5437 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5439 cc_op0
= XEXP (comparison
, 0);
5440 cc_op1
= XEXP (comparison
, 1);
5441 /* Expand both operands and force them in reg, if required. */
5442 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5443 NULL_RTX
, VOIDmode
, 1);
5444 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5445 && mode
!= VOIDmode
)
5446 rtx_op1
= force_reg (mode
, rtx_op1
);
5448 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5449 NULL_RTX
, VOIDmode
, 1);
5450 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5451 && mode
!= VOIDmode
)
5452 rtx_op2
= force_reg (mode
, rtx_op2
);
5454 /* Emit instruction! */
5455 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5456 comparison
, cc_op0
, cc_op1
));
5460 #include "gt-optabs.h"