1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_LOAD_EXPR
:
298 return vec_realign_load_optab
;
304 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
308 return trapv
? addv_optab
: add_optab
;
311 return trapv
? subv_optab
: sub_optab
;
314 return trapv
? smulv_optab
: smul_optab
;
317 return trapv
? negv_optab
: neg_optab
;
320 return trapv
? absv_optab
: abs_optab
;
328 /* Generate code to perform an operation specified by TERNARY_OPTAB
329 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
341 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
343 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
344 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
345 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
346 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
349 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
351 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
355 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
356 temp
= gen_reg_rtx (mode
);
360 /* In case the insn wants input operands in modes different from
361 those of the actual operands, convert the operands. It would
362 seem that we don't need to convert CONST_INTs, but we do, so
363 that they're properly zero-extended, sign-extended or truncated
366 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
367 xop0
= convert_modes (mode0
,
368 GET_MODE (op0
) != VOIDmode
373 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
374 xop1
= convert_modes (mode1
,
375 GET_MODE (op1
) != VOIDmode
380 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
381 xop2
= convert_modes (mode2
,
382 GET_MODE (op2
) != VOIDmode
387 /* Now, if insn's predicates don't allow our operands, put them into
390 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
391 && mode0
!= VOIDmode
)
392 xop0
= copy_to_mode_reg (mode0
, xop0
);
394 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
395 && mode1
!= VOIDmode
)
396 xop1
= copy_to_mode_reg (mode1
, xop1
);
398 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
399 && mode2
!= VOIDmode
)
400 xop2
= copy_to_mode_reg (mode2
, xop2
);
402 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
409 /* Like expand_binop, but return a constant rtx if the result can be
410 calculated at compile time. The arguments and return value are
411 otherwise the same as for expand_binop. */
414 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
415 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
416 enum optab_methods methods
)
418 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
419 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
421 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
424 /* Like simplify_expand_binop, but always put the result in TARGET.
425 Return true if the expansion succeeded. */
428 force_expand_binop (enum machine_mode mode
, optab binoptab
,
429 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
430 enum optab_methods methods
)
432 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
433 target
, unsignedp
, methods
);
437 emit_move_insn (target
, x
);
441 /* This subroutine of expand_doubleword_shift handles the cases in which
442 the effective shift value is >= BITS_PER_WORD. The arguments and return
443 value are the same as for the parent routine, except that SUPERWORD_OP1
444 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
445 INTO_TARGET may be null if the caller has decided to calculate it. */
448 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
449 rtx outof_target
, rtx into_target
,
450 int unsignedp
, enum optab_methods methods
)
452 if (into_target
!= 0)
453 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
454 into_target
, unsignedp
, methods
))
457 if (outof_target
!= 0)
459 /* For a signed right shift, we must fill OUTOF_TARGET with copies
460 of the sign bit, otherwise we must fill it with zeros. */
461 if (binoptab
!= ashr_optab
)
462 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
464 if (!force_expand_binop (word_mode
, binoptab
,
465 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
466 outof_target
, unsignedp
, methods
))
472 /* This subroutine of expand_doubleword_shift handles the cases in which
473 the effective shift value is < BITS_PER_WORD. The arguments and return
474 value are the same as for the parent routine. */
477 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
478 rtx outof_input
, rtx into_input
, rtx op1
,
479 rtx outof_target
, rtx into_target
,
480 int unsignedp
, enum optab_methods methods
,
481 unsigned HOST_WIDE_INT shift_mask
)
483 optab reverse_unsigned_shift
, unsigned_shift
;
486 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
487 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
489 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
490 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
491 the opposite direction to BINOPTAB. */
492 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
494 carries
= outof_input
;
495 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
496 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
501 /* We must avoid shifting by BITS_PER_WORD bits since that is either
502 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
503 has unknown behavior. Do a single shift first, then shift by the
504 remainder. It's OK to use ~OP1 as the remainder if shift counts
505 are truncated to the mode size. */
506 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
507 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
508 if (shift_mask
== BITS_PER_WORD
- 1)
510 tmp
= immed_double_const (-1, -1, op1_mode
);
511 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
516 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
517 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
521 if (tmp
== 0 || carries
== 0)
523 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
524 carries
, tmp
, 0, unsignedp
, methods
);
528 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
529 so the result can go directly into INTO_TARGET if convenient. */
530 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
531 into_target
, unsignedp
, methods
);
535 /* Now OR in the bits carried over from OUTOF_INPUT. */
536 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
537 into_target
, unsignedp
, methods
))
540 /* Use a standard word_mode shift for the out-of half. */
541 if (outof_target
!= 0)
542 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
543 outof_target
, unsignedp
, methods
))
550 #ifdef HAVE_conditional_move
551 /* Try implementing expand_doubleword_shift using conditional moves.
552 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
553 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
554 are the shift counts to use in the former and latter case. All other
555 arguments are the same as the parent routine. */
558 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
559 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
560 rtx outof_input
, rtx into_input
,
561 rtx subword_op1
, rtx superword_op1
,
562 rtx outof_target
, rtx into_target
,
563 int unsignedp
, enum optab_methods methods
,
564 unsigned HOST_WIDE_INT shift_mask
)
566 rtx outof_superword
, into_superword
;
568 /* Put the superword version of the output into OUTOF_SUPERWORD and
570 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
571 if (outof_target
!= 0 && subword_op1
== superword_op1
)
573 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
574 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
575 into_superword
= outof_target
;
576 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
577 outof_superword
, 0, unsignedp
, methods
))
582 into_superword
= gen_reg_rtx (word_mode
);
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, into_superword
,
589 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
590 if (!expand_subword_shift (op1_mode
, binoptab
,
591 outof_input
, into_input
, subword_op1
,
592 outof_target
, into_target
,
593 unsignedp
, methods
, shift_mask
))
596 /* Select between them. Do the INTO half first because INTO_SUPERWORD
597 might be the current value of OUTOF_TARGET. */
598 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
599 into_target
, into_superword
, word_mode
, false))
602 if (outof_target
!= 0)
603 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 outof_target
, outof_superword
,
612 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
613 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
614 input operand; the shift moves bits in the direction OUTOF_INPUT->
615 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
616 of the target. OP1 is the shift count and OP1_MODE is its mode.
617 If OP1 is constant, it will have been truncated as appropriate
618 and is known to be nonzero.
620 If SHIFT_MASK is zero, the result of word shifts is undefined when the
621 shift count is outside the range [0, BITS_PER_WORD). This routine must
622 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
624 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
625 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
626 fill with zeros or sign bits as appropriate.
628 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
629 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
630 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
631 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
634 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
635 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
636 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
637 function wants to calculate it itself.
639 Return true if the shift could be successfully synthesized. */
642 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
643 rtx outof_input
, rtx into_input
, rtx op1
,
644 rtx outof_target
, rtx into_target
,
645 int unsignedp
, enum optab_methods methods
,
646 unsigned HOST_WIDE_INT shift_mask
)
648 rtx superword_op1
, tmp
, cmp1
, cmp2
;
649 rtx subword_label
, done_label
;
650 enum rtx_code cmp_code
;
652 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
653 fill the result with sign or zero bits as appropriate. If so, the value
654 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
655 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
656 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
658 This isn't worthwhile for constant shifts since the optimizers will
659 cope better with in-range shift counts. */
660 if (shift_mask
>= BITS_PER_WORD
662 && !CONSTANT_P (op1
))
664 if (!expand_doubleword_shift (op1_mode
, binoptab
,
665 outof_input
, into_input
, op1
,
667 unsignedp
, methods
, shift_mask
))
669 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
670 outof_target
, unsignedp
, methods
))
675 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
676 is true when the effective shift value is less than BITS_PER_WORD.
677 Set SUPERWORD_OP1 to the shift count that should be used to shift
678 OUTOF_INPUT into INTO_TARGET when the condition is false. */
679 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
680 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
682 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
683 is a subword shift count. */
684 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
686 cmp2
= CONST0_RTX (op1_mode
);
692 /* Set CMP1 to OP1 - BITS_PER_WORD. */
693 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
695 cmp2
= CONST0_RTX (op1_mode
);
697 superword_op1
= cmp1
;
702 /* If we can compute the condition at compile time, pick the
703 appropriate subroutine. */
704 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
705 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
707 if (tmp
== const0_rtx
)
708 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
709 outof_target
, into_target
,
712 return expand_subword_shift (op1_mode
, binoptab
,
713 outof_input
, into_input
, op1
,
714 outof_target
, into_target
,
715 unsignedp
, methods
, shift_mask
);
718 #ifdef HAVE_conditional_move
719 /* Try using conditional moves to generate straight-line code. */
721 rtx start
= get_last_insn ();
722 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
723 cmp_code
, cmp1
, cmp2
,
724 outof_input
, into_input
,
726 outof_target
, into_target
,
727 unsignedp
, methods
, shift_mask
))
729 delete_insns_since (start
);
733 /* As a last resort, use branches to select the correct alternative. */
734 subword_label
= gen_label_rtx ();
735 done_label
= gen_label_rtx ();
737 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
738 0, 0, subword_label
);
740 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
741 outof_target
, into_target
,
745 emit_jump_insn (gen_jump (done_label
));
747 emit_label (subword_label
);
749 if (!expand_subword_shift (op1_mode
, binoptab
,
750 outof_input
, into_input
, op1
,
751 outof_target
, into_target
,
752 unsignedp
, methods
, shift_mask
))
755 emit_label (done_label
);
759 /* Subroutine of expand_binop. Perform a double word multiplication of
760 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
761 as the target's word_mode. This function return NULL_RTX if anything
762 goes wrong, in which case it may have already emitted instructions
763 which need to be deleted.
765 If we want to multiply two two-word values and have normal and widening
766 multiplies of single-word values, we can do this with three smaller
767 multiplications. Note that we do not make a REG_NO_CONFLICT block here
768 because we are not operating on one word at a time.
770 The multiplication proceeds as follows:
771 _______________________
772 [__op0_high_|__op0_low__]
773 _______________________
774 * [__op1_high_|__op1_low__]
775 _______________________________________________
776 _______________________
777 (1) [__op0_low__*__op1_low__]
778 _______________________
779 (2a) [__op0_low__*__op1_high_]
780 _______________________
781 (2b) [__op0_high_*__op1_low__]
782 _______________________
783 (3) [__op0_high_*__op1_high_]
786 This gives a 4-word result. Since we are only interested in the
787 lower 2 words, partial result (3) and the upper words of (2a) and
788 (2b) don't need to be calculated. Hence (2a) and (2b) can be
789 calculated using non-widening multiplication.
791 (1), however, needs to be calculated with an unsigned widening
792 multiplication. If this operation is not directly supported we
793 try using a signed widening multiplication and adjust the result.
794 This adjustment works as follows:
796 If both operands are positive then no adjustment is needed.
798 If the operands have different signs, for example op0_low < 0 and
799 op1_low >= 0, the instruction treats the most significant bit of
800 op0_low as a sign bit instead of a bit with significance
801 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
802 with 2**BITS_PER_WORD - op0_low, and two's complements the
803 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
806 Similarly, if both operands are negative, we need to add
807 (op0_low + op1_low) * 2**BITS_PER_WORD.
809 We use a trick to adjust quickly. We logically shift op0_low right
810 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
811 op0_high (op1_high) before it is used to calculate 2b (2a). If no
812 logical shift exists, we do an arithmetic right shift and subtract
816 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
817 bool umulp
, enum optab_methods methods
)
819 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
820 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
821 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
822 rtx product
, adjust
, product_high
, temp
;
824 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
825 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
826 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
827 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
829 /* If we're using an unsigned multiply to directly compute the product
830 of the low-order words of the operands and perform any required
831 adjustments of the operands, we begin by trying two more multiplications
832 and then computing the appropriate sum.
834 We have checked above that the required addition is provided.
835 Full-word addition will normally always succeed, especially if
836 it is provided at all, so we don't worry about its failure. The
837 multiplication may well fail, however, so we do handle that. */
841 /* ??? This could be done with emit_store_flag where available. */
842 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
843 NULL_RTX
, 1, methods
);
845 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
846 NULL_RTX
, 0, OPTAB_DIRECT
);
849 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
850 NULL_RTX
, 0, methods
);
853 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
854 NULL_RTX
, 0, OPTAB_DIRECT
);
861 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
862 NULL_RTX
, 0, OPTAB_DIRECT
);
866 /* OP0_HIGH should now be dead. */
870 /* ??? This could be done with emit_store_flag where available. */
871 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
872 NULL_RTX
, 1, methods
);
874 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
875 NULL_RTX
, 0, OPTAB_DIRECT
);
878 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
879 NULL_RTX
, 0, methods
);
882 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
883 NULL_RTX
, 0, OPTAB_DIRECT
);
890 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
891 NULL_RTX
, 0, OPTAB_DIRECT
);
895 /* OP1_HIGH should now be dead. */
897 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
898 adjust
, 0, OPTAB_DIRECT
);
900 if (target
&& !REG_P (target
))
904 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
905 target
, 1, OPTAB_DIRECT
);
907 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
908 target
, 1, OPTAB_DIRECT
);
913 product_high
= operand_subword (product
, high
, 1, mode
);
914 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
915 REG_P (product_high
) ? product_high
: adjust
,
917 emit_move_insn (product_high
, adjust
);
921 /* Wrapper around expand_binop which takes an rtx code to specify
922 the operation to perform, not an optab pointer. All other
923 arguments are the same. */
925 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
926 rtx op1
, rtx target
, int unsignedp
,
927 enum optab_methods methods
)
929 optab binop
= code_to_optab
[(int) code
];
933 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
936 /* Generate code to perform an operation specified by BINOPTAB
937 on operands OP0 and OP1, with result having machine-mode MODE.
939 UNSIGNEDP is for the case where we have to widen the operands
940 to perform the operation. It says to use zero-extension.
942 If TARGET is nonzero, the value
943 is generated there, if it is convenient to do so.
944 In all cases an rtx is returned for the locus of the value;
945 this may or may not be TARGET. */
948 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
949 rtx target
, int unsignedp
, enum optab_methods methods
)
951 enum optab_methods next_methods
952 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
953 ? OPTAB_WIDEN
: methods
);
954 enum mode_class
class;
955 enum machine_mode wider_mode
;
957 int commutative_op
= 0;
958 int shift_op
= (binoptab
->code
== ASHIFT
959 || binoptab
->code
== ASHIFTRT
960 || binoptab
->code
== LSHIFTRT
961 || binoptab
->code
== ROTATE
962 || binoptab
->code
== ROTATERT
);
963 rtx entry_last
= get_last_insn ();
966 class = GET_MODE_CLASS (mode
);
970 /* Load duplicate non-volatile operands once. */
971 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
973 op0
= force_not_mem (op0
);
978 op0
= force_not_mem (op0
);
979 op1
= force_not_mem (op1
);
983 /* If subtracting an integer constant, convert this into an addition of
984 the negated constant. */
986 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
988 op1
= negate_rtx (mode
, op1
);
989 binoptab
= add_optab
;
992 /* If we are inside an appropriately-short loop and we are optimizing,
993 force expensive constants into a register. */
994 if (CONSTANT_P (op0
) && optimize
995 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
997 if (GET_MODE (op0
) != VOIDmode
)
998 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
999 op0
= force_reg (mode
, op0
);
1002 if (CONSTANT_P (op1
) && optimize
1003 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1005 if (GET_MODE (op1
) != VOIDmode
)
1006 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1007 op1
= force_reg (mode
, op1
);
1010 /* Record where to delete back to if we backtrack. */
1011 last
= get_last_insn ();
1013 /* If operation is commutative,
1014 try to make the first operand a register.
1015 Even better, try to make it the same as the target.
1016 Also try to make the last operand a constant. */
1017 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1018 || binoptab
== smul_widen_optab
1019 || binoptab
== umul_widen_optab
1020 || binoptab
== smul_highpart_optab
1021 || binoptab
== umul_highpart_optab
)
1025 if (((target
== 0 || REG_P (target
))
1029 : rtx_equal_p (op1
, target
))
1030 || GET_CODE (op0
) == CONST_INT
)
1038 /* If we can do it with a three-operand insn, do so. */
1040 if (methods
!= OPTAB_MUST_WIDEN
1041 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1043 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1044 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1045 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1047 rtx xop0
= op0
, xop1
= op1
;
1052 temp
= gen_reg_rtx (mode
);
1054 /* If it is a commutative operator and the modes would match
1055 if we would swap the operands, we can save the conversions. */
1058 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1059 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1063 tmp
= op0
; op0
= op1
; op1
= tmp
;
1064 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1068 /* In case the insn wants input operands in modes different from
1069 those of the actual operands, convert the operands. It would
1070 seem that we don't need to convert CONST_INTs, but we do, so
1071 that they're properly zero-extended, sign-extended or truncated
1074 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1075 xop0
= convert_modes (mode0
,
1076 GET_MODE (op0
) != VOIDmode
1081 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1082 xop1
= convert_modes (mode1
,
1083 GET_MODE (op1
) != VOIDmode
1088 /* Now, if insn's predicates don't allow our operands, put them into
1091 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
1092 && mode0
!= VOIDmode
)
1093 xop0
= copy_to_mode_reg (mode0
, xop0
);
1095 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
1096 && mode1
!= VOIDmode
)
1097 xop1
= copy_to_mode_reg (mode1
, xop1
);
1099 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
1100 temp
= gen_reg_rtx (mode
);
1102 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1105 /* If PAT is composed of more than one insn, try to add an appropriate
1106 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1107 operand, call ourselves again, this time without a target. */
1108 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1109 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1111 delete_insns_since (last
);
1112 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1113 unsignedp
, methods
);
1120 delete_insns_since (last
);
1123 /* If this is a multiply, see if we can do a widening operation that
1124 takes operands of this mode and makes a wider mode. */
1126 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1127 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1128 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1129 != CODE_FOR_nothing
))
1131 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1132 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1133 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1137 if (GET_MODE_CLASS (mode
) == MODE_INT
)
1138 return gen_lowpart (mode
, temp
);
1140 return convert_to_mode (mode
, temp
, unsignedp
);
1144 /* Look for a wider mode of the same class for which we think we
1145 can open-code the operation. Check for a widening multiply at the
1146 wider mode as well. */
1148 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1149 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1150 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1151 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1153 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1154 || (binoptab
== smul_optab
1155 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1156 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1157 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1158 != CODE_FOR_nothing
)))
1160 rtx xop0
= op0
, xop1
= op1
;
1163 /* For certain integer operations, we need not actually extend
1164 the narrow operands, as long as we will truncate
1165 the results to the same narrowness. */
1167 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1168 || binoptab
== xor_optab
1169 || binoptab
== add_optab
|| binoptab
== sub_optab
1170 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1171 && class == MODE_INT
)
1174 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1176 /* The second operand of a shift must always be extended. */
1177 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1178 no_extend
&& binoptab
!= ashl_optab
);
1180 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1181 unsignedp
, OPTAB_DIRECT
);
1184 if (class != MODE_INT
)
1187 target
= gen_reg_rtx (mode
);
1188 convert_move (target
, temp
, 0);
1192 return gen_lowpart (mode
, temp
);
1195 delete_insns_since (last
);
1199 /* These can be done a word at a time. */
1200 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1201 && class == MODE_INT
1202 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1203 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1209 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1210 won't be accurate, so use a new target. */
1211 if (target
== 0 || target
== op0
|| target
== op1
)
1212 target
= gen_reg_rtx (mode
);
1216 /* Do the actual arithmetic. */
1217 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1219 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1220 rtx x
= expand_binop (word_mode
, binoptab
,
1221 operand_subword_force (op0
, i
, mode
),
1222 operand_subword_force (op1
, i
, mode
),
1223 target_piece
, unsignedp
, next_methods
);
1228 if (target_piece
!= x
)
1229 emit_move_insn (target_piece
, x
);
1232 insns
= get_insns ();
1235 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1237 if (binoptab
->code
!= UNKNOWN
)
1239 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1240 copy_rtx (op0
), copy_rtx (op1
));
1244 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1249 /* Synthesize double word shifts from single word shifts. */
1250 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1251 || binoptab
== ashr_optab
)
1252 && class == MODE_INT
1253 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1254 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1255 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1256 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1257 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1259 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1260 enum machine_mode op1_mode
;
1262 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1263 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1264 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1266 /* Apply the truncation to constant shifts. */
1267 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1268 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1270 if (op1
== CONST0_RTX (op1_mode
))
1273 /* Make sure that this is a combination that expand_doubleword_shift
1274 can handle. See the comments there for details. */
1275 if (double_shift_mask
== 0
1276 || (shift_mask
== BITS_PER_WORD
- 1
1277 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1279 rtx insns
, equiv_value
;
1280 rtx into_target
, outof_target
;
1281 rtx into_input
, outof_input
;
1282 int left_shift
, outof_word
;
1284 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1285 won't be accurate, so use a new target. */
1286 if (target
== 0 || target
== op0
|| target
== op1
)
1287 target
= gen_reg_rtx (mode
);
1291 /* OUTOF_* is the word we are shifting bits away from, and
1292 INTO_* is the word that we are shifting bits towards, thus
1293 they differ depending on the direction of the shift and
1294 WORDS_BIG_ENDIAN. */
1296 left_shift
= binoptab
== ashl_optab
;
1297 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1299 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1300 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1302 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1303 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1305 if (expand_doubleword_shift (op1_mode
, binoptab
,
1306 outof_input
, into_input
, op1
,
1307 outof_target
, into_target
,
1308 unsignedp
, methods
, shift_mask
))
1310 insns
= get_insns ();
1313 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1314 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1321 /* Synthesize double word rotates from single word shifts. */
1322 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1323 && class == MODE_INT
1324 && GET_CODE (op1
) == CONST_INT
1325 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1326 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1327 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1329 rtx insns
, equiv_value
;
1330 rtx into_target
, outof_target
;
1331 rtx into_input
, outof_input
;
1333 int shift_count
, left_shift
, outof_word
;
1335 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1336 won't be accurate, so use a new target. Do this also if target is not
1337 a REG, first because having a register instead may open optimization
1338 opportunities, and second because if target and op0 happen to be MEMs
1339 designating the same location, we would risk clobbering it too early
1340 in the code sequence we generate below. */
1341 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1342 target
= gen_reg_rtx (mode
);
1346 shift_count
= INTVAL (op1
);
1348 /* OUTOF_* is the word we are shifting bits away from, and
1349 INTO_* is the word that we are shifting bits towards, thus
1350 they differ depending on the direction of the shift and
1351 WORDS_BIG_ENDIAN. */
1353 left_shift
= (binoptab
== rotl_optab
);
1354 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1356 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1357 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1359 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1360 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1362 if (shift_count
== BITS_PER_WORD
)
1364 /* This is just a word swap. */
1365 emit_move_insn (outof_target
, into_input
);
1366 emit_move_insn (into_target
, outof_input
);
1371 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1372 rtx first_shift_count
, second_shift_count
;
1373 optab reverse_unsigned_shift
, unsigned_shift
;
1375 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1376 ? lshr_optab
: ashl_optab
);
1378 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1379 ? ashl_optab
: lshr_optab
);
1381 if (shift_count
> BITS_PER_WORD
)
1383 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1384 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1388 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1389 second_shift_count
= GEN_INT (shift_count
);
1392 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1393 outof_input
, first_shift_count
,
1394 NULL_RTX
, unsignedp
, next_methods
);
1395 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1396 into_input
, second_shift_count
,
1397 NULL_RTX
, unsignedp
, next_methods
);
1399 if (into_temp1
!= 0 && into_temp2
!= 0)
1400 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1401 into_target
, unsignedp
, next_methods
);
1405 if (inter
!= 0 && inter
!= into_target
)
1406 emit_move_insn (into_target
, inter
);
1408 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1409 into_input
, first_shift_count
,
1410 NULL_RTX
, unsignedp
, next_methods
);
1411 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1412 outof_input
, second_shift_count
,
1413 NULL_RTX
, unsignedp
, next_methods
);
1415 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1416 inter
= expand_binop (word_mode
, ior_optab
,
1417 outof_temp1
, outof_temp2
,
1418 outof_target
, unsignedp
, next_methods
);
1420 if (inter
!= 0 && inter
!= outof_target
)
1421 emit_move_insn (outof_target
, inter
);
1424 insns
= get_insns ();
1429 if (binoptab
->code
!= UNKNOWN
)
1430 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1434 /* We can't make this a no conflict block if this is a word swap,
1435 because the word swap case fails if the input and output values
1436 are in the same register. */
1437 if (shift_count
!= BITS_PER_WORD
)
1438 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1447 /* These can be done a word at a time by propagating carries. */
1448 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1449 && class == MODE_INT
1450 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1451 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1454 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1455 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1456 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1457 rtx xop0
, xop1
, xtarget
;
1459 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1460 value is one of those, use it. Otherwise, use 1 since it is the
1461 one easiest to get. */
1462 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1463 int normalizep
= STORE_FLAG_VALUE
;
1468 /* Prepare the operands. */
1469 xop0
= force_reg (mode
, op0
);
1470 xop1
= force_reg (mode
, op1
);
1472 xtarget
= gen_reg_rtx (mode
);
1474 if (target
== 0 || !REG_P (target
))
1477 /* Indicate for flow that the entire target reg is being set. */
1479 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1481 /* Do the actual arithmetic. */
1482 for (i
= 0; i
< nwords
; i
++)
1484 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1485 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1486 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1487 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1490 /* Main add/subtract of the input operands. */
1491 x
= expand_binop (word_mode
, binoptab
,
1492 op0_piece
, op1_piece
,
1493 target_piece
, unsignedp
, next_methods
);
1499 /* Store carry from main add/subtract. */
1500 carry_out
= gen_reg_rtx (word_mode
);
1501 carry_out
= emit_store_flag_force (carry_out
,
1502 (binoptab
== add_optab
1505 word_mode
, 1, normalizep
);
1512 /* Add/subtract previous carry to main result. */
1513 newx
= expand_binop (word_mode
,
1514 normalizep
== 1 ? binoptab
: otheroptab
,
1516 NULL_RTX
, 1, next_methods
);
1520 /* Get out carry from adding/subtracting carry in. */
1521 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1522 carry_tmp
= emit_store_flag_force (carry_tmp
,
1523 (binoptab
== add_optab
1526 word_mode
, 1, normalizep
);
1528 /* Logical-ior the two poss. carry together. */
1529 carry_out
= expand_binop (word_mode
, ior_optab
,
1530 carry_out
, carry_tmp
,
1531 carry_out
, 0, next_methods
);
1535 emit_move_insn (target_piece
, newx
);
1539 if (x
!= target_piece
)
1540 emit_move_insn (target_piece
, x
);
1543 carry_in
= carry_out
;
1546 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1548 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1549 || ! rtx_equal_p (target
, xtarget
))
1551 rtx temp
= emit_move_insn (target
, xtarget
);
1553 set_unique_reg_note (temp
,
1555 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1566 delete_insns_since (last
);
1569 /* Attempt to synthesize double word multiplies using a sequence of word
1570 mode multiplications. We first attempt to generate a sequence using a
1571 more efficient unsigned widening multiply, and if that fails we then
1572 try using a signed widening multiply. */
1574 if (binoptab
== smul_optab
1575 && class == MODE_INT
1576 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1577 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1578 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1580 rtx product
= NULL_RTX
;
1582 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1583 != CODE_FOR_nothing
)
1585 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1588 delete_insns_since (last
);
1591 if (product
== NULL_RTX
1592 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1593 != CODE_FOR_nothing
)
1595 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1598 delete_insns_since (last
);
1601 if (product
!= NULL_RTX
)
1603 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1605 temp
= emit_move_insn (target
? target
: product
, product
);
1606 set_unique_reg_note (temp
,
1608 gen_rtx_fmt_ee (MULT
, mode
,
1616 /* It can't be open-coded in this mode.
1617 Use a library call if one is available and caller says that's ok. */
1619 if (binoptab
->handlers
[(int) mode
].libfunc
1620 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1624 enum machine_mode op1_mode
= mode
;
1631 op1_mode
= word_mode
;
1632 /* Specify unsigned here,
1633 since negative shift counts are meaningless. */
1634 op1x
= convert_to_mode (word_mode
, op1
, 1);
1637 if (GET_MODE (op0
) != VOIDmode
1638 && GET_MODE (op0
) != mode
)
1639 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1641 /* Pass 1 for NO_QUEUE so we don't lose any increments
1642 if the libcall is cse'd or moved. */
1643 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1644 NULL_RTX
, LCT_CONST
, mode
, 2,
1645 op0
, mode
, op1x
, op1_mode
);
1647 insns
= get_insns ();
1650 target
= gen_reg_rtx (mode
);
1651 emit_libcall_block (insns
, target
, value
,
1652 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1657 delete_insns_since (last
);
1659 /* It can't be done in this mode. Can we do it in a wider mode? */
1661 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1662 || methods
== OPTAB_MUST_WIDEN
))
1664 /* Caller says, don't even try. */
1665 delete_insns_since (entry_last
);
1669 /* Compute the value of METHODS to pass to recursive calls.
1670 Don't allow widening to be tried recursively. */
1672 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1674 /* Look for a wider mode of the same class for which it appears we can do
1677 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1679 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1680 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1682 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1683 != CODE_FOR_nothing
)
1684 || (methods
== OPTAB_LIB
1685 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1687 rtx xop0
= op0
, xop1
= op1
;
1690 /* For certain integer operations, we need not actually extend
1691 the narrow operands, as long as we will truncate
1692 the results to the same narrowness. */
1694 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1695 || binoptab
== xor_optab
1696 || binoptab
== add_optab
|| binoptab
== sub_optab
1697 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1698 && class == MODE_INT
)
1701 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1702 unsignedp
, no_extend
);
1704 /* The second operand of a shift must always be extended. */
1705 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1706 no_extend
&& binoptab
!= ashl_optab
);
1708 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1709 unsignedp
, methods
);
1712 if (class != MODE_INT
)
1715 target
= gen_reg_rtx (mode
);
1716 convert_move (target
, temp
, 0);
1720 return gen_lowpart (mode
, temp
);
1723 delete_insns_since (last
);
1728 delete_insns_since (entry_last
);
1732 /* Expand a binary operator which has both signed and unsigned forms.
1733 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1736 If we widen unsigned operands, we may use a signed wider operation instead
1737 of an unsigned wider operation, since the result would be the same. */
1740 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1741 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1742 enum optab_methods methods
)
1745 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1746 struct optab wide_soptab
;
1748 /* Do it without widening, if possible. */
1749 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1750 unsignedp
, OPTAB_DIRECT
);
1751 if (temp
|| methods
== OPTAB_DIRECT
)
1754 /* Try widening to a signed int. Make a fake signed optab that
1755 hides any signed insn for direct use. */
1756 wide_soptab
= *soptab
;
1757 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1758 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1760 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1761 unsignedp
, OPTAB_WIDEN
);
1763 /* For unsigned operands, try widening to an unsigned int. */
1764 if (temp
== 0 && unsignedp
)
1765 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1766 unsignedp
, OPTAB_WIDEN
);
1767 if (temp
|| methods
== OPTAB_WIDEN
)
1770 /* Use the right width lib call if that exists. */
1771 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1772 if (temp
|| methods
== OPTAB_LIB
)
1775 /* Must widen and use a lib call, use either signed or unsigned. */
1776 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1777 unsignedp
, methods
);
1781 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1782 unsignedp
, methods
);
1786 /* Generate code to perform an operation specified by UNOPPTAB
1787 on operand OP0, with two results to TARG0 and TARG1.
1788 We assume that the order of the operands for the instruction
1789 is TARG0, TARG1, OP0.
1791 Either TARG0 or TARG1 may be zero, but what that means is that
1792 the result is not actually wanted. We will generate it into
1793 a dummy pseudo-reg and discard it. They may not both be zero.
1795 Returns 1 if this operation can be performed; 0 if not. */
1798 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1801 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1802 enum mode_class
class;
1803 enum machine_mode wider_mode
;
1804 rtx entry_last
= get_last_insn ();
1807 class = GET_MODE_CLASS (mode
);
1810 op0
= force_not_mem (op0
);
1813 targ0
= gen_reg_rtx (mode
);
1815 targ1
= gen_reg_rtx (mode
);
1817 /* Record where to go back to if we fail. */
1818 last
= get_last_insn ();
1820 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1822 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1823 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1827 if (GET_MODE (xop0
) != VOIDmode
1828 && GET_MODE (xop0
) != mode0
)
1829 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1831 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1832 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1833 xop0
= copy_to_mode_reg (mode0
, xop0
);
1835 /* We could handle this, but we should always be called with a pseudo
1836 for our targets and all insns should take them as outputs. */
1837 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1838 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1841 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1848 delete_insns_since (last
);
1851 /* It can't be done in this mode. Can we do it in a wider mode? */
1853 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1855 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1856 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1858 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1859 != CODE_FOR_nothing
)
1861 rtx t0
= gen_reg_rtx (wider_mode
);
1862 rtx t1
= gen_reg_rtx (wider_mode
);
1863 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1865 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1867 convert_move (targ0
, t0
, unsignedp
);
1868 convert_move (targ1
, t1
, unsignedp
);
1872 delete_insns_since (last
);
1877 delete_insns_since (entry_last
);
1881 /* Generate code to perform an operation specified by BINOPTAB
1882 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1883 We assume that the order of the operands for the instruction
1884 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1885 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1887 Either TARG0 or TARG1 may be zero, but what that means is that
1888 the result is not actually wanted. We will generate it into
1889 a dummy pseudo-reg and discard it. They may not both be zero.
1891 Returns 1 if this operation can be performed; 0 if not. */
1894 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1897 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1898 enum mode_class
class;
1899 enum machine_mode wider_mode
;
1900 rtx entry_last
= get_last_insn ();
1903 class = GET_MODE_CLASS (mode
);
1907 op0
= force_not_mem (op0
);
1908 op1
= force_not_mem (op1
);
1911 /* If we are inside an appropriately-short loop and we are optimizing,
1912 force expensive constants into a register. */
1913 if (CONSTANT_P (op0
) && optimize
1914 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1915 op0
= force_reg (mode
, op0
);
1917 if (CONSTANT_P (op1
) && optimize
1918 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1919 op1
= force_reg (mode
, op1
);
1922 targ0
= gen_reg_rtx (mode
);
1924 targ1
= gen_reg_rtx (mode
);
1926 /* Record where to go back to if we fail. */
1927 last
= get_last_insn ();
1929 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1931 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1932 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1933 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1935 rtx xop0
= op0
, xop1
= op1
;
1937 /* In case the insn wants input operands in modes different from
1938 those of the actual operands, convert the operands. It would
1939 seem that we don't need to convert CONST_INTs, but we do, so
1940 that they're properly zero-extended, sign-extended or truncated
1943 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1944 xop0
= convert_modes (mode0
,
1945 GET_MODE (op0
) != VOIDmode
1950 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1951 xop1
= convert_modes (mode1
,
1952 GET_MODE (op1
) != VOIDmode
1957 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1958 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1959 xop0
= copy_to_mode_reg (mode0
, xop0
);
1961 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1962 xop1
= copy_to_mode_reg (mode1
, xop1
);
1964 /* We could handle this, but we should always be called with a pseudo
1965 for our targets and all insns should take them as outputs. */
1966 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1967 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1970 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1977 delete_insns_since (last
);
1980 /* It can't be done in this mode. Can we do it in a wider mode? */
1982 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1984 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1985 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1987 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1988 != CODE_FOR_nothing
)
1990 rtx t0
= gen_reg_rtx (wider_mode
);
1991 rtx t1
= gen_reg_rtx (wider_mode
);
1992 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1993 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1995 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1998 convert_move (targ0
, t0
, unsignedp
);
1999 convert_move (targ1
, t1
, unsignedp
);
2003 delete_insns_since (last
);
2008 delete_insns_since (entry_last
);
2012 /* Expand the two-valued library call indicated by BINOPTAB, but
2013 preserve only one of the values. If TARG0 is non-NULL, the first
2014 value is placed into TARG0; otherwise the second value is placed
2015 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2016 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2017 This routine assumes that the value returned by the library call is
2018 as if the return value was of an integral mode twice as wide as the
2019 mode of OP0. Returns 1 if the call was successful. */
2022 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2023 rtx targ0
, rtx targ1
, enum rtx_code code
)
2025 enum machine_mode mode
;
2026 enum machine_mode libval_mode
;
2030 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2031 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2034 mode
= GET_MODE (op0
);
2035 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2038 /* The value returned by the library function will have twice as
2039 many bits as the nominal MODE. */
2040 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2043 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2044 NULL_RTX
, LCT_CONST
,
2048 /* Get the part of VAL containing the value that we want. */
2049 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2050 targ0
? 0 : GET_MODE_SIZE (mode
));
2051 insns
= get_insns ();
2053 /* Move the into the desired location. */
2054 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2055 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2061 /* Wrapper around expand_unop which takes an rtx code to specify
2062 the operation to perform, not an optab pointer. All other
2063 arguments are the same. */
2065 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2066 rtx target
, int unsignedp
)
2068 optab unop
= code_to_optab
[(int) code
];
2072 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2078 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2080 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2082 enum mode_class
class = GET_MODE_CLASS (mode
);
2083 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2085 enum machine_mode wider_mode
;
2086 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2087 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2089 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2090 != CODE_FOR_nothing
)
2092 rtx xop0
, temp
, last
;
2094 last
= get_last_insn ();
2097 target
= gen_reg_rtx (mode
);
2098 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2099 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2101 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2102 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2103 - GET_MODE_BITSIZE (mode
)),
2104 target
, true, OPTAB_DIRECT
);
2106 delete_insns_since (last
);
2115 /* Try calculating (parity x) as (and (popcount x) 1), where
2116 popcount can also be done in a wider mode. */
2118 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2120 enum mode_class
class = GET_MODE_CLASS (mode
);
2121 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2123 enum machine_mode wider_mode
;
2124 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2125 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2127 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2128 != CODE_FOR_nothing
)
2130 rtx xop0
, temp
, last
;
2132 last
= get_last_insn ();
2135 target
= gen_reg_rtx (mode
);
2136 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2137 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2140 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2141 target
, true, OPTAB_DIRECT
);
2143 delete_insns_since (last
);
2152 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2153 conditions, VAL may already be a SUBREG against which we cannot generate
2154 a further SUBREG. In this case, we expect forcing the value into a
2155 register will work around the situation. */
2158 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2159 enum machine_mode imode
)
2162 ret
= lowpart_subreg (omode
, val
, imode
);
2165 val
= force_reg (imode
, val
);
2166 ret
= lowpart_subreg (omode
, val
, imode
);
2167 gcc_assert (ret
!= NULL
);
2172 /* Expand a floating point absolute value or negation operation via a
2173 logical operation on the sign bit. */
2176 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2177 rtx op0
, rtx target
)
2179 const struct real_format
*fmt
;
2180 int bitpos
, word
, nwords
, i
;
2181 enum machine_mode imode
;
2182 HOST_WIDE_INT hi
, lo
;
2185 /* The format has to have a simple sign bit. */
2186 fmt
= REAL_MODE_FORMAT (mode
);
2190 bitpos
= fmt
->signbit_rw
;
2194 /* Don't create negative zeros if the format doesn't support them. */
2195 if (code
== NEG
&& !fmt
->has_signed_zero
)
2198 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2200 imode
= int_mode_for_mode (mode
);
2201 if (imode
== BLKmode
)
2210 if (FLOAT_WORDS_BIG_ENDIAN
)
2211 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2213 word
= bitpos
/ BITS_PER_WORD
;
2214 bitpos
= bitpos
% BITS_PER_WORD
;
2215 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2218 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2221 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2225 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2231 if (target
== 0 || target
== op0
)
2232 target
= gen_reg_rtx (mode
);
2238 for (i
= 0; i
< nwords
; ++i
)
2240 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2241 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2245 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2247 immed_double_const (lo
, hi
, imode
),
2248 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2249 if (temp
!= targ_piece
)
2250 emit_move_insn (targ_piece
, temp
);
2253 emit_move_insn (targ_piece
, op0_piece
);
2256 insns
= get_insns ();
2259 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2260 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2264 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2265 gen_lowpart (imode
, op0
),
2266 immed_double_const (lo
, hi
, imode
),
2267 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2268 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2270 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2271 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2277 /* Generate code to perform an operation specified by UNOPTAB
2278 on operand OP0, with result having machine-mode MODE.
2280 UNSIGNEDP is for the case where we have to widen the operands
2281 to perform the operation. It says to use zero-extension.
2283 If TARGET is nonzero, the value
2284 is generated there, if it is convenient to do so.
2285 In all cases an rtx is returned for the locus of the value;
2286 this may or may not be TARGET. */
2289 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2292 enum mode_class
class;
2293 enum machine_mode wider_mode
;
2295 rtx last
= get_last_insn ();
2298 class = GET_MODE_CLASS (mode
);
2301 op0
= force_not_mem (op0
);
2303 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2305 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2306 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2312 temp
= gen_reg_rtx (mode
);
2314 if (GET_MODE (xop0
) != VOIDmode
2315 && GET_MODE (xop0
) != mode0
)
2316 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2318 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2320 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2321 xop0
= copy_to_mode_reg (mode0
, xop0
);
2323 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2324 temp
= gen_reg_rtx (mode
);
2326 pat
= GEN_FCN (icode
) (temp
, xop0
);
2329 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2330 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2332 delete_insns_since (last
);
2333 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2341 delete_insns_since (last
);
2344 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2346 /* Widening clz needs special treatment. */
2347 if (unoptab
== clz_optab
)
2349 temp
= widen_clz (mode
, op0
, target
);
2356 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2357 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2358 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2360 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2364 /* For certain operations, we need not actually extend
2365 the narrow operand, as long as we will truncate the
2366 results to the same narrowness. */
2368 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2369 (unoptab
== neg_optab
2370 || unoptab
== one_cmpl_optab
)
2371 && class == MODE_INT
);
2373 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2378 if (class != MODE_INT
)
2381 target
= gen_reg_rtx (mode
);
2382 convert_move (target
, temp
, 0);
2386 return gen_lowpart (mode
, temp
);
2389 delete_insns_since (last
);
2393 /* These can be done a word at a time. */
2394 if (unoptab
== one_cmpl_optab
2395 && class == MODE_INT
2396 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2397 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2402 if (target
== 0 || target
== op0
)
2403 target
= gen_reg_rtx (mode
);
2407 /* Do the actual arithmetic. */
2408 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2410 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2411 rtx x
= expand_unop (word_mode
, unoptab
,
2412 operand_subword_force (op0
, i
, mode
),
2413 target_piece
, unsignedp
);
2415 if (target_piece
!= x
)
2416 emit_move_insn (target_piece
, x
);
2419 insns
= get_insns ();
2422 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2423 gen_rtx_fmt_e (unoptab
->code
, mode
,
2428 if (unoptab
->code
== NEG
)
2430 /* Try negating floating point values by flipping the sign bit. */
2431 if (class == MODE_FLOAT
)
2433 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2438 /* If there is no negation pattern, and we have no negative zero,
2439 try subtracting from zero. */
2440 if (!HONOR_SIGNED_ZEROS (mode
))
2442 temp
= expand_binop (mode
, (unoptab
== negv_optab
2443 ? subv_optab
: sub_optab
),
2444 CONST0_RTX (mode
), op0
, target
,
2445 unsignedp
, OPTAB_DIRECT
);
2451 /* Try calculating parity (x) as popcount (x) % 2. */
2452 if (unoptab
== parity_optab
)
2454 temp
= expand_parity (mode
, op0
, target
);
2460 /* Now try a library call in this mode. */
2461 if (unoptab
->handlers
[(int) mode
].libfunc
)
2465 enum machine_mode outmode
= mode
;
2467 /* All of these functions return small values. Thus we choose to
2468 have them return something that isn't a double-word. */
2469 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2470 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2472 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2476 /* Pass 1 for NO_QUEUE so we don't lose any increments
2477 if the libcall is cse'd or moved. */
2478 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2479 NULL_RTX
, LCT_CONST
, outmode
,
2481 insns
= get_insns ();
2484 target
= gen_reg_rtx (outmode
);
2485 emit_libcall_block (insns
, target
, value
,
2486 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2491 /* It can't be done in this mode. Can we do it in a wider mode? */
2493 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2495 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2496 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2498 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2499 != CODE_FOR_nothing
)
2500 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2504 /* For certain operations, we need not actually extend
2505 the narrow operand, as long as we will truncate the
2506 results to the same narrowness. */
2508 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2509 (unoptab
== neg_optab
2510 || unoptab
== one_cmpl_optab
)
2511 && class == MODE_INT
);
2513 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2516 /* If we are generating clz using wider mode, adjust the
2518 if (unoptab
== clz_optab
&& temp
!= 0)
2519 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2520 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2521 - GET_MODE_BITSIZE (mode
)),
2522 target
, true, OPTAB_DIRECT
);
2526 if (class != MODE_INT
)
2529 target
= gen_reg_rtx (mode
);
2530 convert_move (target
, temp
, 0);
2534 return gen_lowpart (mode
, temp
);
2537 delete_insns_since (last
);
2542 /* One final attempt at implementing negation via subtraction,
2543 this time allowing widening of the operand. */
2544 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2547 temp
= expand_binop (mode
,
2548 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2549 CONST0_RTX (mode
), op0
,
2550 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2558 /* Emit code to compute the absolute value of OP0, with result to
2559 TARGET if convenient. (TARGET may be 0.) The return value says
2560 where the result actually is to be found.
2562 MODE is the mode of the operand; the mode of the result is
2563 different but can be deduced from MODE.
2568 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2569 int result_unsignedp
)
2574 result_unsignedp
= 1;
2576 /* First try to do it with a special abs instruction. */
2577 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2582 /* For floating point modes, try clearing the sign bit. */
2583 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2585 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2590 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2591 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2592 && !HONOR_SIGNED_ZEROS (mode
))
2594 rtx last
= get_last_insn ();
2596 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2598 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2604 delete_insns_since (last
);
2607 /* If this machine has expensive jumps, we can do integer absolute
2608 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2609 where W is the width of MODE. */
2611 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2613 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2614 size_int (GET_MODE_BITSIZE (mode
) - 1),
2617 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2620 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2621 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2631 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2632 int result_unsignedp
, int safe
)
2637 result_unsignedp
= 1;
2639 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2643 /* If that does not win, use conditional jump and negate. */
2645 /* It is safe to use the target if it is the same
2646 as the source if this is also a pseudo register */
2647 if (op0
== target
&& REG_P (op0
)
2648 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2651 op1
= gen_label_rtx ();
2652 if (target
== 0 || ! safe
2653 || GET_MODE (target
) != mode
2654 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2656 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2657 target
= gen_reg_rtx (mode
);
2659 emit_move_insn (target
, op0
);
2662 /* If this mode is an integer too wide to compare properly,
2663 compare word by word. Rely on CSE to optimize constant cases. */
2664 if (GET_MODE_CLASS (mode
) == MODE_INT
2665 && ! can_compare_p (GE
, mode
, ccp_jump
))
2666 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2669 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2670 NULL_RTX
, NULL_RTX
, op1
);
2672 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2675 emit_move_insn (target
, op0
);
2681 /* A subroutine of expand_copysign, perform the copysign operation using the
2682 abs and neg primitives advertised to exist on the target. The assumption
2683 is that we have a split register file, and leaving op0 in fp registers,
2684 and not playing with subregs so much, will help the register allocator. */
2687 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2688 int bitpos
, bool op0_is_abs
)
2690 enum machine_mode imode
;
2691 HOST_WIDE_INT hi
, lo
;
2700 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2707 if (target
== NULL_RTX
)
2708 target
= copy_to_reg (op0
);
2710 emit_move_insn (target
, op0
);
2713 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2715 imode
= int_mode_for_mode (mode
);
2716 if (imode
== BLKmode
)
2718 op1
= gen_lowpart (imode
, op1
);
2723 if (FLOAT_WORDS_BIG_ENDIAN
)
2724 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2726 word
= bitpos
/ BITS_PER_WORD
;
2727 bitpos
= bitpos
% BITS_PER_WORD
;
2728 op1
= operand_subword_force (op1
, word
, mode
);
2731 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2734 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2738 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2742 op1
= expand_binop (imode
, and_optab
, op1
,
2743 immed_double_const (lo
, hi
, imode
),
2744 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2746 label
= gen_label_rtx ();
2747 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2749 if (GET_CODE (op0
) == CONST_DOUBLE
)
2750 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2752 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2754 emit_move_insn (target
, op0
);
2762 /* A subroutine of expand_copysign, perform the entire copysign operation
2763 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2764 is true if op0 is known to have its sign bit clear. */
2767 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2768 int bitpos
, bool op0_is_abs
)
2770 enum machine_mode imode
;
2771 HOST_WIDE_INT hi
, lo
;
2772 int word
, nwords
, i
;
2775 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2777 imode
= int_mode_for_mode (mode
);
2778 if (imode
== BLKmode
)
2787 if (FLOAT_WORDS_BIG_ENDIAN
)
2788 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2790 word
= bitpos
/ BITS_PER_WORD
;
2791 bitpos
= bitpos
% BITS_PER_WORD
;
2792 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2795 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2798 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2802 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2806 if (target
== 0 || target
== op0
|| target
== op1
)
2807 target
= gen_reg_rtx (mode
);
2813 for (i
= 0; i
< nwords
; ++i
)
2815 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2816 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2821 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2822 immed_double_const (~lo
, ~hi
, imode
),
2823 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2825 op1
= expand_binop (imode
, and_optab
,
2826 operand_subword_force (op1
, i
, mode
),
2827 immed_double_const (lo
, hi
, imode
),
2828 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2830 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2831 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2832 if (temp
!= targ_piece
)
2833 emit_move_insn (targ_piece
, temp
);
2836 emit_move_insn (targ_piece
, op0_piece
);
2839 insns
= get_insns ();
2842 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2846 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2847 immed_double_const (lo
, hi
, imode
),
2848 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2850 op0
= gen_lowpart (imode
, op0
);
2852 op0
= expand_binop (imode
, and_optab
, op0
,
2853 immed_double_const (~lo
, ~hi
, imode
),
2854 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2856 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2857 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2858 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2864 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2865 scalar floating point mode. Return NULL if we do not know how to
2866 expand the operation inline. */
2869 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2871 enum machine_mode mode
= GET_MODE (op0
);
2872 const struct real_format
*fmt
;
2876 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2877 gcc_assert (GET_MODE (op1
) == mode
);
2879 /* First try to do it with a special instruction. */
2880 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2881 target
, 0, OPTAB_DIRECT
);
2885 fmt
= REAL_MODE_FORMAT (mode
);
2886 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2890 if (GET_CODE (op0
) == CONST_DOUBLE
)
2892 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2893 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2897 if (fmt
->signbit_ro
>= 0
2898 && (GET_CODE (op0
) == CONST_DOUBLE
2899 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2900 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2902 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2903 fmt
->signbit_ro
, op0_is_abs
);
2908 if (fmt
->signbit_rw
< 0)
2910 return expand_copysign_bit (mode
, op0
, op1
, target
,
2911 fmt
->signbit_rw
, op0_is_abs
);
2914 /* Generate an instruction whose insn-code is INSN_CODE,
2915 with two operands: an output TARGET and an input OP0.
2916 TARGET *must* be nonzero, and the output is always stored there.
2917 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2918 the value that is stored into TARGET. */
2921 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2924 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2929 /* Sign and zero extension from memory is often done specially on
2930 RISC machines, so forcing into a register here can pessimize
2932 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2933 op0
= force_not_mem (op0
);
2935 /* Now, if insn does not accept our operands, put them into pseudos. */
2937 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2938 op0
= copy_to_mode_reg (mode0
, op0
);
2940 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2941 || (flag_force_mem
&& MEM_P (temp
)))
2942 temp
= gen_reg_rtx (GET_MODE (temp
));
2944 pat
= GEN_FCN (icode
) (temp
, op0
);
2946 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2947 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2952 emit_move_insn (target
, temp
);
2955 /* Emit code to perform a series of operations on a multi-word quantity, one
2958 Such a block is preceded by a CLOBBER of the output, consists of multiple
2959 insns, each setting one word of the output, and followed by a SET copying
2960 the output to itself.
2962 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2963 note indicating that it doesn't conflict with the (also multi-word)
2964 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2967 INSNS is a block of code generated to perform the operation, not including
2968 the CLOBBER and final copy. All insns that compute intermediate values
2969 are first emitted, followed by the block as described above.
2971 TARGET, OP0, and OP1 are the output and inputs of the operations,
2972 respectively. OP1 may be zero for a unary operation.
2974 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2977 If TARGET is not a register, INSNS is simply emitted with no special
2978 processing. Likewise if anything in INSNS is not an INSN or if
2979 there is a libcall block inside INSNS.
2981 The final insn emitted is returned. */
2984 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2986 rtx prev
, next
, first
, last
, insn
;
2988 if (!REG_P (target
) || reload_in_progress
)
2989 return emit_insn (insns
);
2991 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2992 if (!NONJUMP_INSN_P (insn
)
2993 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2994 return emit_insn (insns
);
2996 /* First emit all insns that do not store into words of the output and remove
2997 these from the list. */
2998 for (insn
= insns
; insn
; insn
= next
)
3003 next
= NEXT_INSN (insn
);
3005 /* Some ports (cris) create a libcall regions at their own. We must
3006 avoid any potential nesting of LIBCALLs. */
3007 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3008 remove_note (insn
, note
);
3009 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3010 remove_note (insn
, note
);
3012 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3013 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3014 set
= PATTERN (insn
);
3015 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3017 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3018 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3020 set
= XVECEXP (PATTERN (insn
), 0, i
);
3028 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3030 if (PREV_INSN (insn
))
3031 NEXT_INSN (PREV_INSN (insn
)) = next
;
3036 PREV_INSN (next
) = PREV_INSN (insn
);
3042 prev
= get_last_insn ();
3044 /* Now write the CLOBBER of the output, followed by the setting of each
3045 of the words, followed by the final copy. */
3046 if (target
!= op0
&& target
!= op1
)
3047 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3049 for (insn
= insns
; insn
; insn
= next
)
3051 next
= NEXT_INSN (insn
);
3054 if (op1
&& REG_P (op1
))
3055 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3058 if (op0
&& REG_P (op0
))
3059 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3063 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3064 != CODE_FOR_nothing
)
3066 last
= emit_move_insn (target
, target
);
3068 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3072 last
= get_last_insn ();
3074 /* Remove any existing REG_EQUAL note from "last", or else it will
3075 be mistaken for a note referring to the full contents of the
3076 alleged libcall value when found together with the REG_RETVAL
3077 note added below. An existing note can come from an insn
3078 expansion at "last". */
3079 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3083 first
= get_insns ();
3085 first
= NEXT_INSN (prev
);
3087 /* Encapsulate the block so it gets manipulated as a unit. */
3088 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3090 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3095 /* Emit code to make a call to a constant function or a library call.
3097 INSNS is a list containing all insns emitted in the call.
3098 These insns leave the result in RESULT. Our block is to copy RESULT
3099 to TARGET, which is logically equivalent to EQUIV.
3101 We first emit any insns that set a pseudo on the assumption that these are
3102 loading constants into registers; doing so allows them to be safely cse'ed
3103 between blocks. Then we emit all the other insns in the block, followed by
3104 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3105 note with an operand of EQUIV.
3107 Moving assignments to pseudos outside of the block is done to improve
3108 the generated code, but is not required to generate correct code,
3109 hence being unable to move an assignment is not grounds for not making
3110 a libcall block. There are two reasons why it is safe to leave these
3111 insns inside the block: First, we know that these pseudos cannot be
3112 used in generated RTL outside the block since they are created for
3113 temporary purposes within the block. Second, CSE will not record the
3114 values of anything set inside a libcall block, so we know they must
3115 be dead at the end of the block.
3117 Except for the first group of insns (the ones setting pseudos), the
3118 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3121 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3123 rtx final_dest
= target
;
3124 rtx prev
, next
, first
, last
, insn
;
3126 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3127 into a MEM later. Protect the libcall block from this change. */
3128 if (! REG_P (target
) || REG_USERVAR_P (target
))
3129 target
= gen_reg_rtx (GET_MODE (target
));
3131 /* If we're using non-call exceptions, a libcall corresponding to an
3132 operation that may trap may also trap. */
3133 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3135 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3138 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3140 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3141 remove_note (insn
, note
);
3145 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3146 reg note to indicate that this call cannot throw or execute a nonlocal
3147 goto (unless there is already a REG_EH_REGION note, in which case
3149 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3152 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3155 XEXP (note
, 0) = constm1_rtx
;
3157 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3161 /* First emit all insns that set pseudos. Remove them from the list as
3162 we go. Avoid insns that set pseudos which were referenced in previous
3163 insns. These can be generated by move_by_pieces, for example,
3164 to update an address. Similarly, avoid insns that reference things
3165 set in previous insns. */
3167 for (insn
= insns
; insn
; insn
= next
)
3169 rtx set
= single_set (insn
);
3172 /* Some ports (cris) create a libcall regions at their own. We must
3173 avoid any potential nesting of LIBCALLs. */
3174 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3175 remove_note (insn
, note
);
3176 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3177 remove_note (insn
, note
);
3179 next
= NEXT_INSN (insn
);
3181 if (set
!= 0 && REG_P (SET_DEST (set
))
3182 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3184 || ((! INSN_P(insns
)
3185 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3186 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3187 && ! modified_in_p (SET_SRC (set
), insns
)
3188 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3190 if (PREV_INSN (insn
))
3191 NEXT_INSN (PREV_INSN (insn
)) = next
;
3196 PREV_INSN (next
) = PREV_INSN (insn
);
3201 /* Some ports use a loop to copy large arguments onto the stack.
3202 Don't move anything outside such a loop. */
3207 prev
= get_last_insn ();
3209 /* Write the remaining insns followed by the final copy. */
3211 for (insn
= insns
; insn
; insn
= next
)
3213 next
= NEXT_INSN (insn
);
3218 last
= emit_move_insn (target
, result
);
3219 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3220 != CODE_FOR_nothing
)
3221 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3224 /* Remove any existing REG_EQUAL note from "last", or else it will
3225 be mistaken for a note referring to the full contents of the
3226 libcall value when found together with the REG_RETVAL note added
3227 below. An existing note can come from an insn expansion at
3229 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3232 if (final_dest
!= target
)
3233 emit_move_insn (final_dest
, target
);
3236 first
= get_insns ();
3238 first
= NEXT_INSN (prev
);
3240 /* Encapsulate the block so it gets manipulated as a unit. */
3241 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3243 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3244 when the encapsulated region would not be in one basic block,
3245 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3247 bool attach_libcall_retval_notes
= true;
3248 next
= NEXT_INSN (last
);
3249 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3250 if (control_flow_insn_p (insn
))
3252 attach_libcall_retval_notes
= false;
3256 if (attach_libcall_retval_notes
)
3258 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3260 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3266 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3267 PURPOSE describes how this comparison will be used. CODE is the rtx
3268 comparison code we will be using.
3270 ??? Actually, CODE is slightly weaker than that. A target is still
3271 required to implement all of the normal bcc operations, but not
3272 required to implement all (or any) of the unordered bcc operations. */
3275 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3276 enum can_compare_purpose purpose
)
3280 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3282 if (purpose
== ccp_jump
)
3283 return bcc_gen_fctn
[(int) code
] != NULL
;
3284 else if (purpose
== ccp_store_flag
)
3285 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3287 /* There's only one cmov entry point, and it's allowed to fail. */
3290 if (purpose
== ccp_jump
3291 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3293 if (purpose
== ccp_cmov
3294 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3296 if (purpose
== ccp_store_flag
3297 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3299 mode
= GET_MODE_WIDER_MODE (mode
);
3301 while (mode
!= VOIDmode
);
3306 /* This function is called when we are going to emit a compare instruction that
3307 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3309 *PMODE is the mode of the inputs (in case they are const_int).
3310 *PUNSIGNEDP nonzero says that the operands are unsigned;
3311 this matters if they need to be widened.
3313 If they have mode BLKmode, then SIZE specifies the size of both operands.
3315 This function performs all the setup necessary so that the caller only has
3316 to emit a single comparison insn. This setup can involve doing a BLKmode
3317 comparison or emitting a library call to perform the comparison if no insn
3318 is available to handle it.
3319 The values which are passed in through pointers can be modified; the caller
3320 should perform the comparison on the modified values. */
3323 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3324 enum machine_mode
*pmode
, int *punsignedp
,
3325 enum can_compare_purpose purpose
)
3327 enum machine_mode mode
= *pmode
;
3328 rtx x
= *px
, y
= *py
;
3329 int unsignedp
= *punsignedp
;
3330 enum mode_class
class;
3332 class = GET_MODE_CLASS (mode
);
3334 /* They could both be VOIDmode if both args are immediate constants,
3335 but we should fold that at an earlier stage.
3336 With no special code here, this will call abort,
3337 reminding the programmer to implement such folding. */
3339 if (mode
!= BLKmode
&& flag_force_mem
)
3341 /* Load duplicate non-volatile operands once. */
3342 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3344 x
= force_not_mem (x
);
3349 x
= force_not_mem (x
);
3350 y
= force_not_mem (y
);
3354 /* If we are inside an appropriately-short loop and we are optimizing,
3355 force expensive constants into a register. */
3356 if (CONSTANT_P (x
) && optimize
3357 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3358 x
= force_reg (mode
, x
);
3360 if (CONSTANT_P (y
) && optimize
3361 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3362 y
= force_reg (mode
, y
);
3365 /* Abort if we have a non-canonical comparison. The RTL documentation
3366 states that canonical comparisons are required only for targets which
3368 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3372 /* Don't let both operands fail to indicate the mode. */
3373 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3374 x
= force_reg (mode
, x
);
3376 /* Handle all BLKmode compares. */
3378 if (mode
== BLKmode
)
3380 enum machine_mode cmp_mode
, result_mode
;
3381 enum insn_code cmp_code
;
3386 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3391 /* Try to use a memory block compare insn - either cmpstr
3392 or cmpmem will do. */
3393 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3394 cmp_mode
!= VOIDmode
;
3395 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3397 cmp_code
= cmpmem_optab
[cmp_mode
];
3398 if (cmp_code
== CODE_FOR_nothing
)
3399 cmp_code
= cmpstr_optab
[cmp_mode
];
3400 if (cmp_code
== CODE_FOR_nothing
)
3403 /* Must make sure the size fits the insn's mode. */
3404 if ((GET_CODE (size
) == CONST_INT
3405 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3406 || (GET_MODE_BITSIZE (GET_MODE (size
))
3407 > GET_MODE_BITSIZE (cmp_mode
)))
3410 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3411 result
= gen_reg_rtx (result_mode
);
3412 size
= convert_to_mode (cmp_mode
, size
, 1);
3413 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3417 *pmode
= result_mode
;
3421 /* Otherwise call a library function, memcmp. */
3422 libfunc
= memcmp_libfunc
;
3423 length_type
= sizetype
;
3424 result_mode
= TYPE_MODE (integer_type_node
);
3425 cmp_mode
= TYPE_MODE (length_type
);
3426 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3427 TYPE_UNSIGNED (length_type
));
3429 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3436 *pmode
= result_mode
;
3440 /* Don't allow operands to the compare to trap, as that can put the
3441 compare and branch in different basic blocks. */
3442 if (flag_non_call_exceptions
)
3445 x
= force_reg (mode
, x
);
3447 y
= force_reg (mode
, y
);
3452 if (can_compare_p (*pcomparison
, mode
, purpose
))
3455 /* Handle a lib call just for the mode we are using. */
3457 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3459 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3462 /* If we want unsigned, and this mode has a distinct unsigned
3463 comparison routine, use that. */
3464 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3465 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3467 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3468 word_mode
, 2, x
, mode
, y
, mode
);
3472 if (TARGET_LIB_INT_CMP_BIASED
)
3473 /* Integer comparison returns a result that must be compared
3474 against 1, so that even if we do an unsigned compare
3475 afterward, there is still a value that can represent the
3476 result "less than". */
3486 if (class == MODE_FLOAT
)
3487 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3493 /* Before emitting an insn with code ICODE, make sure that X, which is going
3494 to be used for operand OPNUM of the insn, is converted from mode MODE to
3495 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3496 that it is accepted by the operand predicate. Return the new value. */
3499 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3500 enum machine_mode wider_mode
, int unsignedp
)
3502 if (mode
!= wider_mode
)
3503 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3505 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3506 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3510 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3516 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3517 we can do the comparison.
3518 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3519 be NULL_RTX which indicates that only a comparison is to be generated. */
3522 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3523 enum rtx_code comparison
, int unsignedp
, rtx label
)
3525 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3526 enum mode_class
class = GET_MODE_CLASS (mode
);
3527 enum machine_mode wider_mode
= mode
;
3529 /* Try combined insns first. */
3532 enum insn_code icode
;
3533 PUT_MODE (test
, wider_mode
);
3537 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3539 if (icode
!= CODE_FOR_nothing
3540 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3542 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3543 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3544 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3549 /* Handle some compares against zero. */
3550 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3551 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3553 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3554 emit_insn (GEN_FCN (icode
) (x
));
3556 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3560 /* Handle compares for which there is a directly suitable insn. */
3562 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3563 if (icode
!= CODE_FOR_nothing
)
3565 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3566 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3567 emit_insn (GEN_FCN (icode
) (x
, y
));
3569 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3573 if (class != MODE_INT
&& class != MODE_FLOAT
3574 && class != MODE_COMPLEX_FLOAT
)
3577 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3579 while (wider_mode
!= VOIDmode
);
3584 /* Generate code to compare X with Y so that the condition codes are
3585 set and to jump to LABEL if the condition is true. If X is a
3586 constant and Y is not a constant, then the comparison is swapped to
3587 ensure that the comparison RTL has the canonical form.
3589 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3590 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3591 the proper branch condition code.
3593 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3595 MODE is the mode of the inputs (in case they are const_int).
3597 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3598 be passed unchanged to emit_cmp_insn, then potentially converted into an
3599 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3602 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3603 enum machine_mode mode
, int unsignedp
, rtx label
)
3605 rtx op0
= x
, op1
= y
;
3607 /* Swap operands and condition to ensure canonical RTL. */
3608 if (swap_commutative_operands_p (x
, y
))
3610 /* If we're not emitting a branch, this means some caller
3616 comparison
= swap_condition (comparison
);
3620 /* If OP0 is still a constant, then both X and Y must be constants. Force
3621 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3623 if (CONSTANT_P (op0
))
3624 op0
= force_reg (mode
, op0
);
3628 comparison
= unsigned_condition (comparison
);
3630 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3632 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3635 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3638 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3639 enum machine_mode mode
, int unsignedp
)
3641 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3644 /* Emit a library call comparison between floating point X and Y.
3645 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3648 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3649 enum machine_mode
*pmode
, int *punsignedp
)
3651 enum rtx_code comparison
= *pcomparison
;
3652 enum rtx_code swapped
= swap_condition (comparison
);
3653 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3656 enum machine_mode orig_mode
= GET_MODE (x
);
3657 enum machine_mode mode
;
3658 rtx value
, target
, insns
, equiv
;
3660 bool reversed_p
= false;
3662 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3664 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3667 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3670 tmp
= x
; x
= y
; y
= tmp
;
3671 comparison
= swapped
;
3675 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3676 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3678 comparison
= reversed
;
3684 if (mode
== VOIDmode
)
3687 if (mode
!= orig_mode
)
3689 x
= convert_to_mode (mode
, x
, 0);
3690 y
= convert_to_mode (mode
, y
, 0);
3693 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3694 the RTL. The allows the RTL optimizers to delete the libcall if the
3695 condition can be determined at compile-time. */
3696 if (comparison
== UNORDERED
)
3698 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3699 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3700 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3701 temp
, const_true_rtx
, equiv
);
3705 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3706 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3708 rtx true_rtx
, false_rtx
;
3713 true_rtx
= const0_rtx
;
3714 false_rtx
= const_true_rtx
;
3718 true_rtx
= const_true_rtx
;
3719 false_rtx
= const0_rtx
;
3723 true_rtx
= const1_rtx
;
3724 false_rtx
= const0_rtx
;
3728 true_rtx
= const0_rtx
;
3729 false_rtx
= constm1_rtx
;
3733 true_rtx
= constm1_rtx
;
3734 false_rtx
= const0_rtx
;
3738 true_rtx
= const0_rtx
;
3739 false_rtx
= const1_rtx
;
3745 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3746 equiv
, true_rtx
, false_rtx
);
3751 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3752 word_mode
, 2, x
, mode
, y
, mode
);
3753 insns
= get_insns ();
3756 target
= gen_reg_rtx (word_mode
);
3757 emit_libcall_block (insns
, target
, value
, equiv
);
3759 if (comparison
== UNORDERED
3760 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3761 comparison
= reversed_p
? EQ
: NE
;
3766 *pcomparison
= comparison
;
3770 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3773 emit_indirect_jump (rtx loc
)
3775 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3777 loc
= copy_to_mode_reg (Pmode
, loc
);
3779 emit_jump_insn (gen_indirect_jump (loc
));
3783 #ifdef HAVE_conditional_move
3785 /* Emit a conditional move instruction if the machine supports one for that
3786 condition and machine mode.
3788 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3789 the mode to use should they be constants. If it is VOIDmode, they cannot
3792 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3793 should be stored there. MODE is the mode to use should they be constants.
3794 If it is VOIDmode, they cannot both be constants.
3796 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3797 is not supported. */
3800 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3801 enum machine_mode cmode
, rtx op2
, rtx op3
,
3802 enum machine_mode mode
, int unsignedp
)
3804 rtx tem
, subtarget
, comparison
, insn
;
3805 enum insn_code icode
;
3806 enum rtx_code reversed
;
3808 /* If one operand is constant, make it the second one. Only do this
3809 if the other operand is not constant as well. */
3811 if (swap_commutative_operands_p (op0
, op1
))
3816 code
= swap_condition (code
);
3819 /* get_condition will prefer to generate LT and GT even if the old
3820 comparison was against zero, so undo that canonicalization here since
3821 comparisons against zero are cheaper. */
3822 if (code
== LT
&& op1
== const1_rtx
)
3823 code
= LE
, op1
= const0_rtx
;
3824 else if (code
== GT
&& op1
== constm1_rtx
)
3825 code
= GE
, op1
= const0_rtx
;
3827 if (cmode
== VOIDmode
)
3828 cmode
= GET_MODE (op0
);
3830 if (swap_commutative_operands_p (op2
, op3
)
3831 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3840 if (mode
== VOIDmode
)
3841 mode
= GET_MODE (op2
);
3843 icode
= movcc_gen_code
[mode
];
3845 if (icode
== CODE_FOR_nothing
)
3850 op2
= force_not_mem (op2
);
3851 op3
= force_not_mem (op3
);
3855 target
= gen_reg_rtx (mode
);
3859 /* If the insn doesn't accept these operands, put them in pseudos. */
3861 if (! (*insn_data
[icode
].operand
[0].predicate
)
3862 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3863 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3865 if (! (*insn_data
[icode
].operand
[2].predicate
)
3866 (op2
, insn_data
[icode
].operand
[2].mode
))
3867 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3869 if (! (*insn_data
[icode
].operand
[3].predicate
)
3870 (op3
, insn_data
[icode
].operand
[3].mode
))
3871 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3873 /* Everything should now be in the suitable form, so emit the compare insn
3874 and then the conditional move. */
3877 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3879 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3880 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3881 return NULL and let the caller figure out how best to deal with this
3883 if (GET_CODE (comparison
) != code
)
3886 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3888 /* If that failed, then give up. */
3894 if (subtarget
!= target
)
3895 convert_move (target
, subtarget
, 0);
3900 /* Return nonzero if a conditional move of mode MODE is supported.
3902 This function is for combine so it can tell whether an insn that looks
3903 like a conditional move is actually supported by the hardware. If we
3904 guess wrong we lose a bit on optimization, but that's it. */
3905 /* ??? sparc64 supports conditionally moving integers values based on fp
3906 comparisons, and vice versa. How do we handle them? */
3909 can_conditionally_move_p (enum machine_mode mode
)
3911 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3917 #endif /* HAVE_conditional_move */
3919 /* Emit a conditional addition instruction if the machine supports one for that
3920 condition and machine mode.
3922 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3923 the mode to use should they be constants. If it is VOIDmode, they cannot
3926 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3927 should be stored there. MODE is the mode to use should they be constants.
3928 If it is VOIDmode, they cannot both be constants.
3930 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3931 is not supported. */
3934 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3935 enum machine_mode cmode
, rtx op2
, rtx op3
,
3936 enum machine_mode mode
, int unsignedp
)
3938 rtx tem
, subtarget
, comparison
, insn
;
3939 enum insn_code icode
;
3940 enum rtx_code reversed
;
3942 /* If one operand is constant, make it the second one. Only do this
3943 if the other operand is not constant as well. */
3945 if (swap_commutative_operands_p (op0
, op1
))
3950 code
= swap_condition (code
);
3953 /* get_condition will prefer to generate LT and GT even if the old
3954 comparison was against zero, so undo that canonicalization here since
3955 comparisons against zero are cheaper. */
3956 if (code
== LT
&& op1
== const1_rtx
)
3957 code
= LE
, op1
= const0_rtx
;
3958 else if (code
== GT
&& op1
== constm1_rtx
)
3959 code
= GE
, op1
= const0_rtx
;
3961 if (cmode
== VOIDmode
)
3962 cmode
= GET_MODE (op0
);
3964 if (swap_commutative_operands_p (op2
, op3
)
3965 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3974 if (mode
== VOIDmode
)
3975 mode
= GET_MODE (op2
);
3977 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3979 if (icode
== CODE_FOR_nothing
)
3984 op2
= force_not_mem (op2
);
3985 op3
= force_not_mem (op3
);
3989 target
= gen_reg_rtx (mode
);
3991 /* If the insn doesn't accept these operands, put them in pseudos. */
3993 if (! (*insn_data
[icode
].operand
[0].predicate
)
3994 (target
, insn_data
[icode
].operand
[0].mode
))
3995 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3999 if (! (*insn_data
[icode
].operand
[2].predicate
)
4000 (op2
, insn_data
[icode
].operand
[2].mode
))
4001 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4003 if (! (*insn_data
[icode
].operand
[3].predicate
)
4004 (op3
, insn_data
[icode
].operand
[3].mode
))
4005 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4007 /* Everything should now be in the suitable form, so emit the compare insn
4008 and then the conditional move. */
4011 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4013 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4014 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4015 return NULL and let the caller figure out how best to deal with this
4017 if (GET_CODE (comparison
) != code
)
4020 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4022 /* If that failed, then give up. */
4028 if (subtarget
!= target
)
4029 convert_move (target
, subtarget
, 0);
4034 /* These functions attempt to generate an insn body, rather than
4035 emitting the insn, but if the gen function already emits them, we
4036 make no attempt to turn them back into naked patterns. */
4038 /* Generate and return an insn body to add Y to X. */
4041 gen_add2_insn (rtx x
, rtx y
)
4043 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4045 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4046 (x
, insn_data
[icode
].operand
[0].mode
))
4047 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4048 (x
, insn_data
[icode
].operand
[1].mode
))
4049 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4050 (y
, insn_data
[icode
].operand
[2].mode
)))
4053 return (GEN_FCN (icode
) (x
, x
, y
));
4056 /* Generate and return an insn body to add r1 and c,
4057 storing the result in r0. */
4059 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4061 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4063 if (icode
== CODE_FOR_nothing
4064 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4065 (r0
, insn_data
[icode
].operand
[0].mode
))
4066 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4067 (r1
, insn_data
[icode
].operand
[1].mode
))
4068 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4069 (c
, insn_data
[icode
].operand
[2].mode
)))
4072 return (GEN_FCN (icode
) (r0
, r1
, c
));
4076 have_add2_insn (rtx x
, rtx y
)
4080 if (GET_MODE (x
) == VOIDmode
)
4083 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4085 if (icode
== CODE_FOR_nothing
)
4088 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4089 (x
, insn_data
[icode
].operand
[0].mode
))
4090 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4091 (x
, insn_data
[icode
].operand
[1].mode
))
4092 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4093 (y
, insn_data
[icode
].operand
[2].mode
)))
4099 /* Generate and return an insn body to subtract Y from X. */
4102 gen_sub2_insn (rtx x
, rtx y
)
4104 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4106 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4107 (x
, insn_data
[icode
].operand
[0].mode
))
4108 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4109 (x
, insn_data
[icode
].operand
[1].mode
))
4110 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4111 (y
, insn_data
[icode
].operand
[2].mode
)))
4114 return (GEN_FCN (icode
) (x
, x
, y
));
4117 /* Generate and return an insn body to subtract r1 and c,
4118 storing the result in r0. */
4120 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4122 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4124 if (icode
== CODE_FOR_nothing
4125 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4126 (r0
, insn_data
[icode
].operand
[0].mode
))
4127 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4128 (r1
, insn_data
[icode
].operand
[1].mode
))
4129 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4130 (c
, insn_data
[icode
].operand
[2].mode
)))
4133 return (GEN_FCN (icode
) (r0
, r1
, c
));
4137 have_sub2_insn (rtx x
, rtx y
)
4141 if (GET_MODE (x
) == VOIDmode
)
4144 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4146 if (icode
== CODE_FOR_nothing
)
4149 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4150 (x
, insn_data
[icode
].operand
[0].mode
))
4151 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4152 (x
, insn_data
[icode
].operand
[1].mode
))
4153 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4154 (y
, insn_data
[icode
].operand
[2].mode
)))
4160 /* Generate the body of an instruction to copy Y into X.
4161 It may be a list of insns, if one insn isn't enough. */
4164 gen_move_insn (rtx x
, rtx y
)
4169 emit_move_insn_1 (x
, y
);
4175 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4176 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4177 no such operation exists, CODE_FOR_nothing will be returned. */
4180 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4184 #ifdef HAVE_ptr_extend
4186 return CODE_FOR_ptr_extend
;
4189 tab
= unsignedp
? zext_optab
: sext_optab
;
4190 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4193 /* Generate the body of an insn to extend Y (with mode MFROM)
4194 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4197 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4198 enum machine_mode mfrom
, int unsignedp
)
4200 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4201 return GEN_FCN (icode
) (x
, y
);
4204 /* can_fix_p and can_float_p say whether the target machine
4205 can directly convert a given fixed point type to
4206 a given floating point type, or vice versa.
4207 The returned value is the CODE_FOR_... value to use,
4208 or CODE_FOR_nothing if these modes cannot be directly converted.
4210 *TRUNCP_PTR is set to 1 if it is necessary to output
4211 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4213 static enum insn_code
4214 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4215 int unsignedp
, int *truncp_ptr
)
4218 enum insn_code icode
;
4220 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4221 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4222 if (icode
!= CODE_FOR_nothing
)
4228 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4229 for this to work. We need to rework the fix* and ftrunc* patterns
4230 and documentation. */
4231 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4232 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4233 if (icode
!= CODE_FOR_nothing
4234 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4241 return CODE_FOR_nothing
;
4244 static enum insn_code
4245 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4250 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4251 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4254 /* Generate code to convert FROM to floating point
4255 and store in TO. FROM must be fixed point and not VOIDmode.
4256 UNSIGNEDP nonzero means regard FROM as unsigned.
4257 Normally this is done by correcting the final value
4258 if it is negative. */
4261 expand_float (rtx to
, rtx from
, int unsignedp
)
4263 enum insn_code icode
;
4265 enum machine_mode fmode
, imode
;
4267 /* Crash now, because we won't be able to decide which mode to use. */
4268 if (GET_MODE (from
) == VOIDmode
)
4271 /* Look for an insn to do the conversion. Do it in the specified
4272 modes if possible; otherwise convert either input, output or both to
4273 wider mode. If the integer mode is wider than the mode of FROM,
4274 we can do the conversion signed even if the input is unsigned. */
4276 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4277 fmode
= GET_MODE_WIDER_MODE (fmode
))
4278 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4279 imode
= GET_MODE_WIDER_MODE (imode
))
4281 int doing_unsigned
= unsignedp
;
4283 if (fmode
!= GET_MODE (to
)
4284 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4287 icode
= can_float_p (fmode
, imode
, unsignedp
);
4288 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4289 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4291 if (icode
!= CODE_FOR_nothing
)
4293 if (imode
!= GET_MODE (from
))
4294 from
= convert_to_mode (imode
, from
, unsignedp
);
4296 if (fmode
!= GET_MODE (to
))
4297 target
= gen_reg_rtx (fmode
);
4299 emit_unop_insn (icode
, target
, from
,
4300 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4303 convert_move (to
, target
, 0);
4308 /* Unsigned integer, and no way to convert directly.
4309 Convert as signed, then conditionally adjust the result. */
4312 rtx label
= gen_label_rtx ();
4314 REAL_VALUE_TYPE offset
;
4317 from
= force_not_mem (from
);
4319 /* Look for a usable floating mode FMODE wider than the source and at
4320 least as wide as the target. Using FMODE will avoid rounding woes
4321 with unsigned values greater than the signed maximum value. */
4323 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4324 fmode
= GET_MODE_WIDER_MODE (fmode
))
4325 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4326 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4329 if (fmode
== VOIDmode
)
4331 /* There is no such mode. Pretend the target is wide enough. */
4332 fmode
= GET_MODE (to
);
4334 /* Avoid double-rounding when TO is narrower than FROM. */
4335 if ((significand_size (fmode
) + 1)
4336 < GET_MODE_BITSIZE (GET_MODE (from
)))
4339 rtx neglabel
= gen_label_rtx ();
4341 /* Don't use TARGET if it isn't a register, is a hard register,
4342 or is the wrong mode. */
4344 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4345 || GET_MODE (target
) != fmode
)
4346 target
= gen_reg_rtx (fmode
);
4348 imode
= GET_MODE (from
);
4349 do_pending_stack_adjust ();
4351 /* Test whether the sign bit is set. */
4352 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4355 /* The sign bit is not set. Convert as signed. */
4356 expand_float (target
, from
, 0);
4357 emit_jump_insn (gen_jump (label
));
4360 /* The sign bit is set.
4361 Convert to a usable (positive signed) value by shifting right
4362 one bit, while remembering if a nonzero bit was shifted
4363 out; i.e., compute (from & 1) | (from >> 1). */
4365 emit_label (neglabel
);
4366 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4367 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4368 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4370 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4372 expand_float (target
, temp
, 0);
4374 /* Multiply by 2 to undo the shift above. */
4375 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4376 target
, 0, OPTAB_LIB_WIDEN
);
4378 emit_move_insn (target
, temp
);
4380 do_pending_stack_adjust ();
4386 /* If we are about to do some arithmetic to correct for an
4387 unsigned operand, do it in a pseudo-register. */
4389 if (GET_MODE (to
) != fmode
4390 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4391 target
= gen_reg_rtx (fmode
);
4393 /* Convert as signed integer to floating. */
4394 expand_float (target
, from
, 0);
4396 /* If FROM is negative (and therefore TO is negative),
4397 correct its value by 2**bitwidth. */
4399 do_pending_stack_adjust ();
4400 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4404 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4405 temp
= expand_binop (fmode
, add_optab
, target
,
4406 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4407 target
, 0, OPTAB_LIB_WIDEN
);
4409 emit_move_insn (target
, temp
);
4411 do_pending_stack_adjust ();
4416 /* No hardware instruction available; call a library routine. */
4421 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4423 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4424 from
= convert_to_mode (SImode
, from
, unsignedp
);
4427 from
= force_not_mem (from
);
4429 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4435 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4436 GET_MODE (to
), 1, from
,
4438 insns
= get_insns ();
4441 emit_libcall_block (insns
, target
, value
,
4442 gen_rtx_FLOAT (GET_MODE (to
), from
));
4447 /* Copy result to requested destination
4448 if we have been computing in a temp location. */
4452 if (GET_MODE (target
) == GET_MODE (to
))
4453 emit_move_insn (to
, target
);
4455 convert_move (to
, target
, 0);
4459 /* Generate code to convert FROM to fixed point and store in TO. FROM
4460 must be floating point. */
4463 expand_fix (rtx to
, rtx from
, int unsignedp
)
4465 enum insn_code icode
;
4467 enum machine_mode fmode
, imode
;
4470 /* We first try to find a pair of modes, one real and one integer, at
4471 least as wide as FROM and TO, respectively, in which we can open-code
4472 this conversion. If the integer mode is wider than the mode of TO,
4473 we can do the conversion either signed or unsigned. */
4475 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4476 fmode
= GET_MODE_WIDER_MODE (fmode
))
4477 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4478 imode
= GET_MODE_WIDER_MODE (imode
))
4480 int doing_unsigned
= unsignedp
;
4482 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4483 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4484 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4486 if (icode
!= CODE_FOR_nothing
)
4488 if (fmode
!= GET_MODE (from
))
4489 from
= convert_to_mode (fmode
, from
, 0);
4493 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4494 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4498 if (imode
!= GET_MODE (to
))
4499 target
= gen_reg_rtx (imode
);
4501 emit_unop_insn (icode
, target
, from
,
4502 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4504 convert_move (to
, target
, unsignedp
);
4509 /* For an unsigned conversion, there is one more way to do it.
4510 If we have a signed conversion, we generate code that compares
4511 the real value to the largest representable positive number. If if
4512 is smaller, the conversion is done normally. Otherwise, subtract
4513 one plus the highest signed number, convert, and add it back.
4515 We only need to check all real modes, since we know we didn't find
4516 anything with a wider integer mode.
4518 This code used to extend FP value into mode wider than the destination.
4519 This is not needed. Consider, for instance conversion from SFmode
4522 The hot path trought the code is dealing with inputs smaller than 2^63
4523 and doing just the conversion, so there is no bits to lose.
4525 In the other path we know the value is positive in the range 2^63..2^64-1
4526 inclusive. (as for other imput overflow happens and result is undefined)
4527 So we know that the most important bit set in mantissa corresponds to
4528 2^63. The subtraction of 2^63 should not generate any rounding as it
4529 simply clears out that bit. The rest is trivial. */
4531 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4532 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4533 fmode
= GET_MODE_WIDER_MODE (fmode
))
4534 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4538 REAL_VALUE_TYPE offset
;
4539 rtx limit
, lab1
, lab2
, insn
;
4541 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4542 real_2expN (&offset
, bitsize
- 1);
4543 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4544 lab1
= gen_label_rtx ();
4545 lab2
= gen_label_rtx ();
4548 from
= force_not_mem (from
);
4550 if (fmode
!= GET_MODE (from
))
4551 from
= convert_to_mode (fmode
, from
, 0);
4553 /* See if we need to do the subtraction. */
4554 do_pending_stack_adjust ();
4555 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4558 /* If not, do the signed "fix" and branch around fixup code. */
4559 expand_fix (to
, from
, 0);
4560 emit_jump_insn (gen_jump (lab2
));
4563 /* Otherwise, subtract 2**(N-1), convert to signed number,
4564 then add 2**(N-1). Do the addition using XOR since this
4565 will often generate better code. */
4567 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4568 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4569 expand_fix (to
, target
, 0);
4570 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4572 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4574 to
, 1, OPTAB_LIB_WIDEN
);
4577 emit_move_insn (to
, target
);
4581 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4582 != CODE_FOR_nothing
)
4584 /* Make a place for a REG_NOTE and add it. */
4585 insn
= emit_move_insn (to
, to
);
4586 set_unique_reg_note (insn
,
4588 gen_rtx_fmt_e (UNSIGNED_FIX
,
4596 /* We can't do it with an insn, so use a library call. But first ensure
4597 that the mode of TO is at least as wide as SImode, since those are the
4598 only library calls we know about. */
4600 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4602 target
= gen_reg_rtx (SImode
);
4604 expand_fix (target
, from
, unsignedp
);
4612 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4613 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4618 from
= force_not_mem (from
);
4622 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4623 GET_MODE (to
), 1, from
,
4625 insns
= get_insns ();
4628 emit_libcall_block (insns
, target
, value
,
4629 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4630 GET_MODE (to
), from
));
4635 if (GET_MODE (to
) == GET_MODE (target
))
4636 emit_move_insn (to
, target
);
4638 convert_move (to
, target
, 0);
4642 /* Report whether we have an instruction to perform the operation
4643 specified by CODE on operands of mode MODE. */
4645 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4647 return (code_to_optab
[(int) code
] != 0
4648 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4649 != CODE_FOR_nothing
));
4652 /* Create a blank optab. */
4657 optab op
= ggc_alloc (sizeof (struct optab
));
4658 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4660 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4661 op
->handlers
[i
].libfunc
= 0;
4667 static convert_optab
4668 new_convert_optab (void)
4671 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4672 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4673 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4675 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4676 op
->handlers
[i
][j
].libfunc
= 0;
4681 /* Same, but fill in its code as CODE, and write it into the
4682 code_to_optab table. */
4684 init_optab (enum rtx_code code
)
4686 optab op
= new_optab ();
4688 code_to_optab
[(int) code
] = op
;
4692 /* Same, but fill in its code as CODE, and do _not_ write it into
4693 the code_to_optab table. */
4695 init_optabv (enum rtx_code code
)
4697 optab op
= new_optab ();
4702 /* Conversion optabs never go in the code_to_optab table. */
4703 static inline convert_optab
4704 init_convert_optab (enum rtx_code code
)
4706 convert_optab op
= new_convert_optab ();
4711 /* Initialize the libfunc fields of an entire group of entries in some
4712 optab. Each entry is set equal to a string consisting of a leading
4713 pair of underscores followed by a generic operation name followed by
4714 a mode name (downshifted to lowercase) followed by a single character
4715 representing the number of operands for the given operation (which is
4716 usually one of the characters '2', '3', or '4').
4718 OPTABLE is the table in which libfunc fields are to be initialized.
4719 FIRST_MODE is the first machine mode index in the given optab to
4721 LAST_MODE is the last machine mode index in the given optab to
4723 OPNAME is the generic (string) name of the operation.
4724 SUFFIX is the character which specifies the number of operands for
4725 the given generic operation.
4729 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4730 const char *opname
, int suffix
)
4733 unsigned opname_len
= strlen (opname
);
4735 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4736 mode
= (enum machine_mode
) ((int) mode
+ 1))
4738 const char *mname
= GET_MODE_NAME (mode
);
4739 unsigned mname_len
= strlen (mname
);
4740 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4747 for (q
= opname
; *q
; )
4749 for (q
= mname
; *q
; q
++)
4750 *p
++ = TOLOWER (*q
);
4754 optable
->handlers
[(int) mode
].libfunc
4755 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4759 /* Initialize the libfunc fields of an entire group of entries in some
4760 optab which correspond to all integer mode operations. The parameters
4761 have the same meaning as similarly named ones for the `init_libfuncs'
4762 routine. (See above). */
4765 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4767 int maxsize
= 2*BITS_PER_WORD
;
4768 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4769 maxsize
= LONG_LONG_TYPE_SIZE
;
4770 init_libfuncs (optable
, word_mode
,
4771 mode_for_size (maxsize
, MODE_INT
, 0),
4775 /* Initialize the libfunc fields of an entire group of entries in some
4776 optab which correspond to all real mode operations. The parameters
4777 have the same meaning as similarly named ones for the `init_libfuncs'
4778 routine. (See above). */
4781 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4783 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4786 /* Initialize the libfunc fields of an entire group of entries of an
4787 inter-mode-class conversion optab. The string formation rules are
4788 similar to the ones for init_libfuncs, above, but instead of having
4789 a mode name and an operand count these functions have two mode names
4790 and no operand count. */
4792 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4793 enum mode_class from_class
,
4794 enum mode_class to_class
)
4796 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4797 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4798 size_t opname_len
= strlen (opname
);
4799 size_t max_mname_len
= 0;
4801 enum machine_mode fmode
, tmode
;
4802 const char *fname
, *tname
;
4804 char *libfunc_name
, *suffix
;
4807 for (fmode
= first_from_mode
;
4809 fmode
= GET_MODE_WIDER_MODE (fmode
))
4810 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4812 for (tmode
= first_to_mode
;
4814 tmode
= GET_MODE_WIDER_MODE (tmode
))
4815 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4817 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4818 libfunc_name
[0] = '_';
4819 libfunc_name
[1] = '_';
4820 memcpy (&libfunc_name
[2], opname
, opname_len
);
4821 suffix
= libfunc_name
+ opname_len
+ 2;
4823 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4824 fmode
= GET_MODE_WIDER_MODE (fmode
))
4825 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4826 tmode
= GET_MODE_WIDER_MODE (tmode
))
4828 fname
= GET_MODE_NAME (fmode
);
4829 tname
= GET_MODE_NAME (tmode
);
4832 for (q
= fname
; *q
; p
++, q
++)
4834 for (q
= tname
; *q
; p
++, q
++)
4839 tab
->handlers
[tmode
][fmode
].libfunc
4840 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4845 /* Initialize the libfunc fields of an entire group of entries of an
4846 intra-mode-class conversion optab. The string formation rules are
4847 similar to the ones for init_libfunc, above. WIDENING says whether
4848 the optab goes from narrow to wide modes or vice versa. These functions
4849 have two mode names _and_ an operand count. */
4851 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4852 enum mode_class
class, bool widening
)
4854 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4855 size_t opname_len
= strlen (opname
);
4856 size_t max_mname_len
= 0;
4858 enum machine_mode nmode
, wmode
;
4859 const char *nname
, *wname
;
4861 char *libfunc_name
, *suffix
;
4864 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4865 nmode
= GET_MODE_WIDER_MODE (nmode
))
4866 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4868 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4869 libfunc_name
[0] = '_';
4870 libfunc_name
[1] = '_';
4871 memcpy (&libfunc_name
[2], opname
, opname_len
);
4872 suffix
= libfunc_name
+ opname_len
+ 2;
4874 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4875 nmode
= GET_MODE_WIDER_MODE (nmode
))
4876 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4877 wmode
= GET_MODE_WIDER_MODE (wmode
))
4879 nname
= GET_MODE_NAME (nmode
);
4880 wname
= GET_MODE_NAME (wmode
);
4883 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4885 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4891 tab
->handlers
[widening
? wmode
: nmode
]
4892 [widening
? nmode
: wmode
].libfunc
4893 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4900 init_one_libfunc (const char *name
)
4904 /* Create a FUNCTION_DECL that can be passed to
4905 targetm.encode_section_info. */
4906 /* ??? We don't have any type information except for this is
4907 a function. Pretend this is "int foo()". */
4908 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4909 build_function_type (integer_type_node
, NULL_TREE
));
4910 DECL_ARTIFICIAL (decl
) = 1;
4911 DECL_EXTERNAL (decl
) = 1;
4912 TREE_PUBLIC (decl
) = 1;
4914 symbol
= XEXP (DECL_RTL (decl
), 0);
4916 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4917 are the flags assigned by targetm.encode_section_info. */
4918 SYMBOL_REF_DECL (symbol
) = 0;
4923 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4924 MODE to NAME, which should be either 0 or a string constant. */
4926 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4929 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4931 optable
->handlers
[mode
].libfunc
= 0;
4934 /* Call this to reset the function entry for one conversion optab
4935 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4936 either 0 or a string constant. */
4938 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4939 enum machine_mode fmode
, const char *name
)
4942 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4944 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4947 /* Call this once to initialize the contents of the optabs
4948 appropriately for the current target machine. */
4955 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4957 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4958 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4960 #ifdef HAVE_conditional_move
4961 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4962 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4965 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4967 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4968 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4971 add_optab
= init_optab (PLUS
);
4972 addv_optab
= init_optabv (PLUS
);
4973 sub_optab
= init_optab (MINUS
);
4974 subv_optab
= init_optabv (MINUS
);
4975 smul_optab
= init_optab (MULT
);
4976 smulv_optab
= init_optabv (MULT
);
4977 smul_highpart_optab
= init_optab (UNKNOWN
);
4978 umul_highpart_optab
= init_optab (UNKNOWN
);
4979 smul_widen_optab
= init_optab (UNKNOWN
);
4980 umul_widen_optab
= init_optab (UNKNOWN
);
4981 sdiv_optab
= init_optab (DIV
);
4982 sdivv_optab
= init_optabv (DIV
);
4983 sdivmod_optab
= init_optab (UNKNOWN
);
4984 udiv_optab
= init_optab (UDIV
);
4985 udivmod_optab
= init_optab (UNKNOWN
);
4986 smod_optab
= init_optab (MOD
);
4987 umod_optab
= init_optab (UMOD
);
4988 fmod_optab
= init_optab (UNKNOWN
);
4989 drem_optab
= init_optab (UNKNOWN
);
4990 ftrunc_optab
= init_optab (UNKNOWN
);
4991 and_optab
= init_optab (AND
);
4992 ior_optab
= init_optab (IOR
);
4993 xor_optab
= init_optab (XOR
);
4994 ashl_optab
= init_optab (ASHIFT
);
4995 ashr_optab
= init_optab (ASHIFTRT
);
4996 lshr_optab
= init_optab (LSHIFTRT
);
4997 rotl_optab
= init_optab (ROTATE
);
4998 rotr_optab
= init_optab (ROTATERT
);
4999 smin_optab
= init_optab (SMIN
);
5000 smax_optab
= init_optab (SMAX
);
5001 umin_optab
= init_optab (UMIN
);
5002 umax_optab
= init_optab (UMAX
);
5003 pow_optab
= init_optab (UNKNOWN
);
5004 atan2_optab
= init_optab (UNKNOWN
);
5006 /* These three have codes assigned exclusively for the sake of
5008 mov_optab
= init_optab (SET
);
5009 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5010 cmp_optab
= init_optab (COMPARE
);
5012 ucmp_optab
= init_optab (UNKNOWN
);
5013 tst_optab
= init_optab (UNKNOWN
);
5015 eq_optab
= init_optab (EQ
);
5016 ne_optab
= init_optab (NE
);
5017 gt_optab
= init_optab (GT
);
5018 ge_optab
= init_optab (GE
);
5019 lt_optab
= init_optab (LT
);
5020 le_optab
= init_optab (LE
);
5021 unord_optab
= init_optab (UNORDERED
);
5023 neg_optab
= init_optab (NEG
);
5024 negv_optab
= init_optabv (NEG
);
5025 abs_optab
= init_optab (ABS
);
5026 absv_optab
= init_optabv (ABS
);
5027 addcc_optab
= init_optab (UNKNOWN
);
5028 one_cmpl_optab
= init_optab (NOT
);
5029 ffs_optab
= init_optab (FFS
);
5030 clz_optab
= init_optab (CLZ
);
5031 ctz_optab
= init_optab (CTZ
);
5032 popcount_optab
= init_optab (POPCOUNT
);
5033 parity_optab
= init_optab (PARITY
);
5034 sqrt_optab
= init_optab (SQRT
);
5035 floor_optab
= init_optab (UNKNOWN
);
5036 lfloor_optab
= init_optab (UNKNOWN
);
5037 ceil_optab
= init_optab (UNKNOWN
);
5038 round_optab
= init_optab (UNKNOWN
);
5039 btrunc_optab
= init_optab (UNKNOWN
);
5040 nearbyint_optab
= init_optab (UNKNOWN
);
5041 rint_optab
= init_optab (UNKNOWN
);
5042 lrint_optab
= init_optab (UNKNOWN
);
5043 sincos_optab
= init_optab (UNKNOWN
);
5044 sin_optab
= init_optab (UNKNOWN
);
5045 asin_optab
= init_optab (UNKNOWN
);
5046 cos_optab
= init_optab (UNKNOWN
);
5047 acos_optab
= init_optab (UNKNOWN
);
5048 exp_optab
= init_optab (UNKNOWN
);
5049 exp10_optab
= init_optab (UNKNOWN
);
5050 exp2_optab
= init_optab (UNKNOWN
);
5051 expm1_optab
= init_optab (UNKNOWN
);
5052 ldexp_optab
= init_optab (UNKNOWN
);
5053 logb_optab
= init_optab (UNKNOWN
);
5054 ilogb_optab
= init_optab (UNKNOWN
);
5055 log_optab
= init_optab (UNKNOWN
);
5056 log10_optab
= init_optab (UNKNOWN
);
5057 log2_optab
= init_optab (UNKNOWN
);
5058 log1p_optab
= init_optab (UNKNOWN
);
5059 tan_optab
= init_optab (UNKNOWN
);
5060 atan_optab
= init_optab (UNKNOWN
);
5061 copysign_optab
= init_optab (UNKNOWN
);
5063 strlen_optab
= init_optab (UNKNOWN
);
5064 cbranch_optab
= init_optab (UNKNOWN
);
5065 cmov_optab
= init_optab (UNKNOWN
);
5066 cstore_optab
= init_optab (UNKNOWN
);
5067 push_optab
= init_optab (UNKNOWN
);
5069 vec_extract_optab
= init_optab (UNKNOWN
);
5070 vec_set_optab
= init_optab (UNKNOWN
);
5071 vec_init_optab
= init_optab (UNKNOWN
);
5072 vec_realign_load_optab
= init_optab (UNKNOWN
);
5073 movmisalign_optab
= init_optab (UNKNOWN
);
5075 powi_optab
= init_optab (UNKNOWN
);
5078 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5079 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5080 trunc_optab
= init_convert_optab (TRUNCATE
);
5081 sfix_optab
= init_convert_optab (FIX
);
5082 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5083 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5084 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5085 sfloat_optab
= init_convert_optab (FLOAT
);
5086 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5088 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5090 movmem_optab
[i
] = CODE_FOR_nothing
;
5091 clrmem_optab
[i
] = CODE_FOR_nothing
;
5092 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5093 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5095 #ifdef HAVE_SECONDARY_RELOADS
5096 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5100 /* Fill in the optabs with the insns we support. */
5103 /* Initialize the optabs with the names of the library functions. */
5104 init_integral_libfuncs (add_optab
, "add", '3');
5105 init_floating_libfuncs (add_optab
, "add", '3');
5106 init_integral_libfuncs (addv_optab
, "addv", '3');
5107 init_floating_libfuncs (addv_optab
, "add", '3');
5108 init_integral_libfuncs (sub_optab
, "sub", '3');
5109 init_floating_libfuncs (sub_optab
, "sub", '3');
5110 init_integral_libfuncs (subv_optab
, "subv", '3');
5111 init_floating_libfuncs (subv_optab
, "sub", '3');
5112 init_integral_libfuncs (smul_optab
, "mul", '3');
5113 init_floating_libfuncs (smul_optab
, "mul", '3');
5114 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5115 init_floating_libfuncs (smulv_optab
, "mul", '3');
5116 init_integral_libfuncs (sdiv_optab
, "div", '3');
5117 init_floating_libfuncs (sdiv_optab
, "div", '3');
5118 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5119 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5120 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5121 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5122 init_integral_libfuncs (smod_optab
, "mod", '3');
5123 init_integral_libfuncs (umod_optab
, "umod", '3');
5124 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5125 init_integral_libfuncs (and_optab
, "and", '3');
5126 init_integral_libfuncs (ior_optab
, "ior", '3');
5127 init_integral_libfuncs (xor_optab
, "xor", '3');
5128 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5129 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5130 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5131 init_integral_libfuncs (smin_optab
, "min", '3');
5132 init_floating_libfuncs (smin_optab
, "min", '3');
5133 init_integral_libfuncs (smax_optab
, "max", '3');
5134 init_floating_libfuncs (smax_optab
, "max", '3');
5135 init_integral_libfuncs (umin_optab
, "umin", '3');
5136 init_integral_libfuncs (umax_optab
, "umax", '3');
5137 init_integral_libfuncs (neg_optab
, "neg", '2');
5138 init_floating_libfuncs (neg_optab
, "neg", '2');
5139 init_integral_libfuncs (negv_optab
, "negv", '2');
5140 init_floating_libfuncs (negv_optab
, "neg", '2');
5141 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5142 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5143 init_integral_libfuncs (clz_optab
, "clz", '2');
5144 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5145 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5146 init_integral_libfuncs (parity_optab
, "parity", '2');
5148 /* Comparison libcalls for integers MUST come in pairs,
5150 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5151 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5152 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5154 /* EQ etc are floating point only. */
5155 init_floating_libfuncs (eq_optab
, "eq", '2');
5156 init_floating_libfuncs (ne_optab
, "ne", '2');
5157 init_floating_libfuncs (gt_optab
, "gt", '2');
5158 init_floating_libfuncs (ge_optab
, "ge", '2');
5159 init_floating_libfuncs (lt_optab
, "lt", '2');
5160 init_floating_libfuncs (le_optab
, "le", '2');
5161 init_floating_libfuncs (unord_optab
, "unord", '2');
5163 init_floating_libfuncs (powi_optab
, "powi", '2');
5166 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5167 MODE_INT
, MODE_FLOAT
);
5168 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5169 MODE_FLOAT
, MODE_INT
);
5170 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5171 MODE_FLOAT
, MODE_INT
);
5173 /* sext_optab is also used for FLOAT_EXTEND. */
5174 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5175 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5177 /* Use cabs for double complex abs, since systems generally have cabs.
5178 Don't define any libcall for float complex, so that cabs will be used. */
5179 if (complex_double_type_node
)
5180 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5181 = init_one_libfunc ("cabs");
5183 /* The ffs function operates on `int'. */
5184 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5185 = init_one_libfunc ("ffs");
5187 abort_libfunc
= init_one_libfunc ("abort");
5188 memcpy_libfunc
= init_one_libfunc ("memcpy");
5189 memmove_libfunc
= init_one_libfunc ("memmove");
5190 memcmp_libfunc
= init_one_libfunc ("memcmp");
5191 memset_libfunc
= init_one_libfunc ("memset");
5192 setbits_libfunc
= init_one_libfunc ("__setbits");
5194 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5195 ? "_Unwind_SjLj_Resume"
5196 : "_Unwind_Resume");
5197 #ifndef DONT_USE_BUILTIN_SETJMP
5198 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5199 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5201 setjmp_libfunc
= init_one_libfunc ("setjmp");
5202 longjmp_libfunc
= init_one_libfunc ("longjmp");
5204 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5205 unwind_sjlj_unregister_libfunc
5206 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5208 /* For function entry/exit instrumentation. */
5209 profile_function_entry_libfunc
5210 = init_one_libfunc ("__cyg_profile_func_enter");
5211 profile_function_exit_libfunc
5212 = init_one_libfunc ("__cyg_profile_func_exit");
5214 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5216 if (HAVE_conditional_trap
)
5217 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5219 /* Allow the target to add more libcalls or rename some, etc. */
5220 targetm
.init_libfuncs ();
5225 /* Print information about the current contents of the optabs on
5229 debug_optab_libfuncs (void)
5235 /* Dump the arithmetic optabs. */
5236 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5237 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5240 struct optab_handlers
*h
;
5243 h
= &o
->handlers
[j
];
5246 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5248 fprintf (stderr
, "%s\t%s:\t%s\n",
5249 GET_RTX_NAME (o
->code
),
5251 XSTR (h
->libfunc
, 0));
5255 /* Dump the conversion optabs. */
5256 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5257 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5258 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5261 struct optab_handlers
*h
;
5263 o
= &convert_optab_table
[i
];
5264 h
= &o
->handlers
[j
][k
];
5267 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5269 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5270 GET_RTX_NAME (o
->code
),
5273 XSTR (h
->libfunc
, 0));
5281 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5282 CODE. Return 0 on failure. */
5285 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5286 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5288 enum machine_mode mode
= GET_MODE (op1
);
5289 enum insn_code icode
;
5292 if (!HAVE_conditional_trap
)
5295 if (mode
== VOIDmode
)
5298 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5299 if (icode
== CODE_FOR_nothing
)
5303 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5304 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5310 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5312 PUT_CODE (trap_rtx
, code
);
5313 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5317 insn
= get_insns ();
5324 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5325 or unsigned operation code. */
5327 static enum rtx_code
5328 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5340 code
= unsignedp
? LTU
: LT
;
5343 code
= unsignedp
? LEU
: LE
;
5346 code
= unsignedp
? GTU
: GT
;
5349 code
= unsignedp
? GEU
: GE
;
5352 case UNORDERED_EXPR
:
5383 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5384 unsigned operators. Do not generate compare instruction. */
5387 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5389 enum rtx_code rcode
;
5391 rtx rtx_op0
, rtx_op1
;
5393 if (!COMPARISON_CLASS_P (cond
))
5395 /* This is unlikely. While generating VEC_COND_EXPR,
5396 auto vectorizer ensures that condition is a relational
5402 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5403 t_op0
= TREE_OPERAND (cond
, 0);
5404 t_op1
= TREE_OPERAND (cond
, 1);
5407 /* Expand operands. */
5408 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5409 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5411 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5412 && GET_MODE (rtx_op0
) != VOIDmode
)
5413 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5415 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5416 && GET_MODE (rtx_op1
) != VOIDmode
)
5417 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5419 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5422 /* Return insn code for VEC_COND_EXPR EXPR. */
5424 static inline enum insn_code
5425 get_vcond_icode (tree expr
, enum machine_mode mode
)
5427 enum insn_code icode
= CODE_FOR_nothing
;
5429 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5430 icode
= vcondu_gen_code
[mode
];
5432 icode
= vcond_gen_code
[mode
];
5436 /* Return TRUE iff, appropriate vector insns are available
5437 for vector cond expr expr in VMODE mode. */
5440 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5442 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5447 /* Generate insns for VEC_COND_EXPR. */
5450 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5452 enum insn_code icode
;
5453 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5454 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5455 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5457 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5458 if (icode
== CODE_FOR_nothing
)
5462 target
= gen_reg_rtx (mode
);
5464 /* Get comparison rtx. First expand both cond expr operands. */
5465 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5467 cc_op0
= XEXP (comparison
, 0);
5468 cc_op1
= XEXP (comparison
, 1);
5469 /* Expand both operands and force them in reg, if required. */
5470 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5471 NULL_RTX
, VOIDmode
, 1);
5472 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5473 && mode
!= VOIDmode
)
5474 rtx_op1
= force_reg (mode
, rtx_op1
);
5476 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5477 NULL_RTX
, VOIDmode
, 1);
5478 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5479 && mode
!= VOIDmode
)
5480 rtx_op2
= force_reg (mode
, rtx_op2
);
5482 /* Emit instruction! */
5483 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5484 comparison
, cc_op0
, cc_op1
));
5488 #include "gt-optabs.h"