1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_LOAD_EXPR
:
298 return vec_realign_load_optab
;
304 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
308 return trapv
? addv_optab
: add_optab
;
311 return trapv
? subv_optab
: sub_optab
;
314 return trapv
? smulv_optab
: smul_optab
;
317 return trapv
? negv_optab
: neg_optab
;
320 return trapv
? absv_optab
: abs_optab
;
328 /* Generate code to perform an operation specified by TERNARY_OPTAB
329 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
341 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
343 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
344 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
345 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
346 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
349 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
351 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
355 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
356 temp
= gen_reg_rtx (mode
);
360 /* In case the insn wants input operands in modes different from
361 those of the actual operands, convert the operands. It would
362 seem that we don't need to convert CONST_INTs, but we do, so
363 that they're properly zero-extended, sign-extended or truncated
366 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
367 xop0
= convert_modes (mode0
,
368 GET_MODE (op0
) != VOIDmode
373 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
374 xop1
= convert_modes (mode1
,
375 GET_MODE (op1
) != VOIDmode
380 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
381 xop2
= convert_modes (mode2
,
382 GET_MODE (op2
) != VOIDmode
387 /* Now, if insn's predicates don't allow our operands, put them into
390 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
391 && mode0
!= VOIDmode
)
392 xop0
= copy_to_mode_reg (mode0
, xop0
);
394 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
395 && mode1
!= VOIDmode
)
396 xop1
= copy_to_mode_reg (mode1
, xop1
);
398 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
399 && mode2
!= VOIDmode
)
400 xop2
= copy_to_mode_reg (mode2
, xop2
);
402 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
409 /* Like expand_binop, but return a constant rtx if the result can be
410 calculated at compile time. The arguments and return value are
411 otherwise the same as for expand_binop. */
414 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
415 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
416 enum optab_methods methods
)
418 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
419 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
421 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
424 /* Like simplify_expand_binop, but always put the result in TARGET.
425 Return true if the expansion succeeded. */
428 force_expand_binop (enum machine_mode mode
, optab binoptab
,
429 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
430 enum optab_methods methods
)
432 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
433 target
, unsignedp
, methods
);
437 emit_move_insn (target
, x
);
441 /* This subroutine of expand_doubleword_shift handles the cases in which
442 the effective shift value is >= BITS_PER_WORD. The arguments and return
443 value are the same as for the parent routine, except that SUPERWORD_OP1
444 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
445 INTO_TARGET may be null if the caller has decided to calculate it. */
448 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
449 rtx outof_target
, rtx into_target
,
450 int unsignedp
, enum optab_methods methods
)
452 if (into_target
!= 0)
453 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
454 into_target
, unsignedp
, methods
))
457 if (outof_target
!= 0)
459 /* For a signed right shift, we must fill OUTOF_TARGET with copies
460 of the sign bit, otherwise we must fill it with zeros. */
461 if (binoptab
!= ashr_optab
)
462 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
464 if (!force_expand_binop (word_mode
, binoptab
,
465 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
466 outof_target
, unsignedp
, methods
))
472 /* This subroutine of expand_doubleword_shift handles the cases in which
473 the effective shift value is < BITS_PER_WORD. The arguments and return
474 value are the same as for the parent routine. */
477 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
478 rtx outof_input
, rtx into_input
, rtx op1
,
479 rtx outof_target
, rtx into_target
,
480 int unsignedp
, enum optab_methods methods
,
481 unsigned HOST_WIDE_INT shift_mask
)
483 optab reverse_unsigned_shift
, unsigned_shift
;
486 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
487 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
489 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
490 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
491 the opposite direction to BINOPTAB. */
492 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
494 carries
= outof_input
;
495 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
496 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
501 /* We must avoid shifting by BITS_PER_WORD bits since that is either
502 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
503 has unknown behavior. Do a single shift first, then shift by the
504 remainder. It's OK to use ~OP1 as the remainder if shift counts
505 are truncated to the mode size. */
506 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
507 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
508 if (shift_mask
== BITS_PER_WORD
- 1)
510 tmp
= immed_double_const (-1, -1, op1_mode
);
511 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
516 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
517 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
521 if (tmp
== 0 || carries
== 0)
523 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
524 carries
, tmp
, 0, unsignedp
, methods
);
528 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
529 so the result can go directly into INTO_TARGET if convenient. */
530 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
531 into_target
, unsignedp
, methods
);
535 /* Now OR in the bits carried over from OUTOF_INPUT. */
536 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
537 into_target
, unsignedp
, methods
))
540 /* Use a standard word_mode shift for the out-of half. */
541 if (outof_target
!= 0)
542 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
543 outof_target
, unsignedp
, methods
))
550 #ifdef HAVE_conditional_move
551 /* Try implementing expand_doubleword_shift using conditional moves.
552 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
553 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
554 are the shift counts to use in the former and latter case. All other
555 arguments are the same as the parent routine. */
558 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
559 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
560 rtx outof_input
, rtx into_input
,
561 rtx subword_op1
, rtx superword_op1
,
562 rtx outof_target
, rtx into_target
,
563 int unsignedp
, enum optab_methods methods
,
564 unsigned HOST_WIDE_INT shift_mask
)
566 rtx outof_superword
, into_superword
;
568 /* Put the superword version of the output into OUTOF_SUPERWORD and
570 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
571 if (outof_target
!= 0 && subword_op1
== superword_op1
)
573 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
574 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
575 into_superword
= outof_target
;
576 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
577 outof_superword
, 0, unsignedp
, methods
))
582 into_superword
= gen_reg_rtx (word_mode
);
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, into_superword
,
589 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
590 if (!expand_subword_shift (op1_mode
, binoptab
,
591 outof_input
, into_input
, subword_op1
,
592 outof_target
, into_target
,
593 unsignedp
, methods
, shift_mask
))
596 /* Select between them. Do the INTO half first because INTO_SUPERWORD
597 might be the current value of OUTOF_TARGET. */
598 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
599 into_target
, into_superword
, word_mode
, false))
602 if (outof_target
!= 0)
603 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 outof_target
, outof_superword
,
612 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
613 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
614 input operand; the shift moves bits in the direction OUTOF_INPUT->
615 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
616 of the target. OP1 is the shift count and OP1_MODE is its mode.
617 If OP1 is constant, it will have been truncated as appropriate
618 and is known to be nonzero.
620 If SHIFT_MASK is zero, the result of word shifts is undefined when the
621 shift count is outside the range [0, BITS_PER_WORD). This routine must
622 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
624 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
625 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
626 fill with zeros or sign bits as appropriate.
628 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
629 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
630 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
631 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
634 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
635 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
636 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
637 function wants to calculate it itself.
639 Return true if the shift could be successfully synthesized. */
642 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
643 rtx outof_input
, rtx into_input
, rtx op1
,
644 rtx outof_target
, rtx into_target
,
645 int unsignedp
, enum optab_methods methods
,
646 unsigned HOST_WIDE_INT shift_mask
)
648 rtx superword_op1
, tmp
, cmp1
, cmp2
;
649 rtx subword_label
, done_label
;
650 enum rtx_code cmp_code
;
652 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
653 fill the result with sign or zero bits as appropriate. If so, the value
654 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
655 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
656 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
658 This isn't worthwhile for constant shifts since the optimizers will
659 cope better with in-range shift counts. */
660 if (shift_mask
>= BITS_PER_WORD
662 && !CONSTANT_P (op1
))
664 if (!expand_doubleword_shift (op1_mode
, binoptab
,
665 outof_input
, into_input
, op1
,
667 unsignedp
, methods
, shift_mask
))
669 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
670 outof_target
, unsignedp
, methods
))
675 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
676 is true when the effective shift value is less than BITS_PER_WORD.
677 Set SUPERWORD_OP1 to the shift count that should be used to shift
678 OUTOF_INPUT into INTO_TARGET when the condition is false. */
679 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
680 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
682 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
683 is a subword shift count. */
684 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
686 cmp2
= CONST0_RTX (op1_mode
);
692 /* Set CMP1 to OP1 - BITS_PER_WORD. */
693 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
695 cmp2
= CONST0_RTX (op1_mode
);
697 superword_op1
= cmp1
;
702 /* If we can compute the condition at compile time, pick the
703 appropriate subroutine. */
704 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
705 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
707 if (tmp
== const0_rtx
)
708 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
709 outof_target
, into_target
,
712 return expand_subword_shift (op1_mode
, binoptab
,
713 outof_input
, into_input
, op1
,
714 outof_target
, into_target
,
715 unsignedp
, methods
, shift_mask
);
718 #ifdef HAVE_conditional_move
719 /* Try using conditional moves to generate straight-line code. */
721 rtx start
= get_last_insn ();
722 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
723 cmp_code
, cmp1
, cmp2
,
724 outof_input
, into_input
,
726 outof_target
, into_target
,
727 unsignedp
, methods
, shift_mask
))
729 delete_insns_since (start
);
733 /* As a last resort, use branches to select the correct alternative. */
734 subword_label
= gen_label_rtx ();
735 done_label
= gen_label_rtx ();
737 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
738 0, 0, subword_label
);
740 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
741 outof_target
, into_target
,
745 emit_jump_insn (gen_jump (done_label
));
747 emit_label (subword_label
);
749 if (!expand_subword_shift (op1_mode
, binoptab
,
750 outof_input
, into_input
, op1
,
751 outof_target
, into_target
,
752 unsignedp
, methods
, shift_mask
))
755 emit_label (done_label
);
759 /* Wrapper around expand_binop which takes an rtx code to specify
760 the operation to perform, not an optab pointer. All other
761 arguments are the same. */
763 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
764 rtx op1
, rtx target
, int unsignedp
,
765 enum optab_methods methods
)
767 optab binop
= code_to_optab
[(int) code
];
771 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
774 /* Generate code to perform an operation specified by BINOPTAB
775 on operands OP0 and OP1, with result having machine-mode MODE.
777 UNSIGNEDP is for the case where we have to widen the operands
778 to perform the operation. It says to use zero-extension.
780 If TARGET is nonzero, the value
781 is generated there, if it is convenient to do so.
782 In all cases an rtx is returned for the locus of the value;
783 this may or may not be TARGET. */
786 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
787 rtx target
, int unsignedp
, enum optab_methods methods
)
789 enum optab_methods next_methods
790 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
791 ? OPTAB_WIDEN
: methods
);
792 enum mode_class
class;
793 enum machine_mode wider_mode
;
795 int commutative_op
= 0;
796 int shift_op
= (binoptab
->code
== ASHIFT
797 || binoptab
->code
== ASHIFTRT
798 || binoptab
->code
== LSHIFTRT
799 || binoptab
->code
== ROTATE
800 || binoptab
->code
== ROTATERT
);
801 rtx entry_last
= get_last_insn ();
804 class = GET_MODE_CLASS (mode
);
808 /* Load duplicate non-volatile operands once. */
809 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
811 op0
= force_not_mem (op0
);
816 op0
= force_not_mem (op0
);
817 op1
= force_not_mem (op1
);
821 /* If subtracting an integer constant, convert this into an addition of
822 the negated constant. */
824 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
826 op1
= negate_rtx (mode
, op1
);
827 binoptab
= add_optab
;
830 /* If we are inside an appropriately-short loop and we are optimizing,
831 force expensive constants into a register. */
832 if (CONSTANT_P (op0
) && optimize
833 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
835 if (GET_MODE (op0
) != VOIDmode
)
836 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
837 op0
= force_reg (mode
, op0
);
840 if (CONSTANT_P (op1
) && optimize
841 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
843 if (GET_MODE (op1
) != VOIDmode
)
844 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
845 op1
= force_reg (mode
, op1
);
848 /* Record where to delete back to if we backtrack. */
849 last
= get_last_insn ();
851 /* If operation is commutative,
852 try to make the first operand a register.
853 Even better, try to make it the same as the target.
854 Also try to make the last operand a constant. */
855 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
856 || binoptab
== smul_widen_optab
857 || binoptab
== umul_widen_optab
858 || binoptab
== smul_highpart_optab
859 || binoptab
== umul_highpart_optab
)
863 if (((target
== 0 || REG_P (target
))
867 : rtx_equal_p (op1
, target
))
868 || GET_CODE (op0
) == CONST_INT
)
876 /* If we can do it with a three-operand insn, do so. */
878 if (methods
!= OPTAB_MUST_WIDEN
879 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
881 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
882 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
883 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
885 rtx xop0
= op0
, xop1
= op1
;
890 temp
= gen_reg_rtx (mode
);
892 /* If it is a commutative operator and the modes would match
893 if we would swap the operands, we can save the conversions. */
896 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
897 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
901 tmp
= op0
; op0
= op1
; op1
= tmp
;
902 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
906 /* In case the insn wants input operands in modes different from
907 those of the actual operands, convert the operands. It would
908 seem that we don't need to convert CONST_INTs, but we do, so
909 that they're properly zero-extended, sign-extended or truncated
912 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
913 xop0
= convert_modes (mode0
,
914 GET_MODE (op0
) != VOIDmode
919 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
920 xop1
= convert_modes (mode1
,
921 GET_MODE (op1
) != VOIDmode
926 /* Now, if insn's predicates don't allow our operands, put them into
929 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
930 && mode0
!= VOIDmode
)
931 xop0
= copy_to_mode_reg (mode0
, xop0
);
933 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
934 && mode1
!= VOIDmode
)
935 xop1
= copy_to_mode_reg (mode1
, xop1
);
937 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
938 temp
= gen_reg_rtx (mode
);
940 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
943 /* If PAT is composed of more than one insn, try to add an appropriate
944 REG_EQUAL note to it. If we can't because TEMP conflicts with an
945 operand, call ourselves again, this time without a target. */
946 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
947 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
949 delete_insns_since (last
);
950 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
958 delete_insns_since (last
);
961 /* If this is a multiply, see if we can do a widening operation that
962 takes operands of this mode and makes a wider mode. */
964 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
965 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
966 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
967 != CODE_FOR_nothing
))
969 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
970 unsignedp
? umul_widen_optab
: smul_widen_optab
,
971 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
975 if (GET_MODE_CLASS (mode
) == MODE_INT
)
976 return gen_lowpart (mode
, temp
);
978 return convert_to_mode (mode
, temp
, unsignedp
);
982 /* Look for a wider mode of the same class for which we think we
983 can open-code the operation. Check for a widening multiply at the
984 wider mode as well. */
986 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
987 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
988 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
989 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
991 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
992 || (binoptab
== smul_optab
993 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
994 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
995 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
996 != CODE_FOR_nothing
)))
998 rtx xop0
= op0
, xop1
= op1
;
1001 /* For certain integer operations, we need not actually extend
1002 the narrow operands, as long as we will truncate
1003 the results to the same narrowness. */
1005 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1006 || binoptab
== xor_optab
1007 || binoptab
== add_optab
|| binoptab
== sub_optab
1008 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1009 && class == MODE_INT
)
1012 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1014 /* The second operand of a shift must always be extended. */
1015 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1016 no_extend
&& binoptab
!= ashl_optab
);
1018 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1019 unsignedp
, OPTAB_DIRECT
);
1022 if (class != MODE_INT
)
1025 target
= gen_reg_rtx (mode
);
1026 convert_move (target
, temp
, 0);
1030 return gen_lowpart (mode
, temp
);
1033 delete_insns_since (last
);
1037 /* These can be done a word at a time. */
1038 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1039 && class == MODE_INT
1040 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1041 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1047 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1048 won't be accurate, so use a new target. */
1049 if (target
== 0 || target
== op0
|| target
== op1
)
1050 target
= gen_reg_rtx (mode
);
1054 /* Do the actual arithmetic. */
1055 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1057 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1058 rtx x
= expand_binop (word_mode
, binoptab
,
1059 operand_subword_force (op0
, i
, mode
),
1060 operand_subword_force (op1
, i
, mode
),
1061 target_piece
, unsignedp
, next_methods
);
1066 if (target_piece
!= x
)
1067 emit_move_insn (target_piece
, x
);
1070 insns
= get_insns ();
1073 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1075 if (binoptab
->code
!= UNKNOWN
)
1077 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1078 copy_rtx (op0
), copy_rtx (op1
));
1082 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1087 /* Synthesize double word shifts from single word shifts. */
1088 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1089 || binoptab
== ashr_optab
)
1090 && class == MODE_INT
1091 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1092 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1093 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1094 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1095 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1097 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1098 enum machine_mode op1_mode
;
1100 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1101 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1102 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1104 /* Apply the truncation to constant shifts. */
1105 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1106 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1108 if (op1
== CONST0_RTX (op1_mode
))
1111 /* Make sure that this is a combination that expand_doubleword_shift
1112 can handle. See the comments there for details. */
1113 if (double_shift_mask
== 0
1114 || (shift_mask
== BITS_PER_WORD
- 1
1115 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1117 rtx insns
, equiv_value
;
1118 rtx into_target
, outof_target
;
1119 rtx into_input
, outof_input
;
1120 int left_shift
, outof_word
;
1122 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1123 won't be accurate, so use a new target. */
1124 if (target
== 0 || target
== op0
|| target
== op1
)
1125 target
= gen_reg_rtx (mode
);
1129 /* OUTOF_* is the word we are shifting bits away from, and
1130 INTO_* is the word that we are shifting bits towards, thus
1131 they differ depending on the direction of the shift and
1132 WORDS_BIG_ENDIAN. */
1134 left_shift
= binoptab
== ashl_optab
;
1135 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1137 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1138 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1140 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1141 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1143 if (expand_doubleword_shift (op1_mode
, binoptab
,
1144 outof_input
, into_input
, op1
,
1145 outof_target
, into_target
,
1146 unsignedp
, methods
, shift_mask
))
1148 insns
= get_insns ();
1151 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1152 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1159 /* Synthesize double word rotates from single word shifts. */
1160 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1161 && class == MODE_INT
1162 && GET_CODE (op1
) == CONST_INT
1163 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1164 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1165 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1167 rtx insns
, equiv_value
;
1168 rtx into_target
, outof_target
;
1169 rtx into_input
, outof_input
;
1171 int shift_count
, left_shift
, outof_word
;
1173 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1174 won't be accurate, so use a new target. Do this also if target is not
1175 a REG, first because having a register instead may open optimization
1176 opportunities, and second because if target and op0 happen to be MEMs
1177 designating the same location, we would risk clobbering it too early
1178 in the code sequence we generate below. */
1179 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1180 target
= gen_reg_rtx (mode
);
1184 shift_count
= INTVAL (op1
);
1186 /* OUTOF_* is the word we are shifting bits away from, and
1187 INTO_* is the word that we are shifting bits towards, thus
1188 they differ depending on the direction of the shift and
1189 WORDS_BIG_ENDIAN. */
1191 left_shift
= (binoptab
== rotl_optab
);
1192 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1194 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1195 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1197 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1198 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1200 if (shift_count
== BITS_PER_WORD
)
1202 /* This is just a word swap. */
1203 emit_move_insn (outof_target
, into_input
);
1204 emit_move_insn (into_target
, outof_input
);
1209 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1210 rtx first_shift_count
, second_shift_count
;
1211 optab reverse_unsigned_shift
, unsigned_shift
;
1213 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1214 ? lshr_optab
: ashl_optab
);
1216 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1217 ? ashl_optab
: lshr_optab
);
1219 if (shift_count
> BITS_PER_WORD
)
1221 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1222 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1226 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1227 second_shift_count
= GEN_INT (shift_count
);
1230 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1231 outof_input
, first_shift_count
,
1232 NULL_RTX
, unsignedp
, next_methods
);
1233 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1234 into_input
, second_shift_count
,
1235 NULL_RTX
, unsignedp
, next_methods
);
1237 if (into_temp1
!= 0 && into_temp2
!= 0)
1238 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1239 into_target
, unsignedp
, next_methods
);
1243 if (inter
!= 0 && inter
!= into_target
)
1244 emit_move_insn (into_target
, inter
);
1246 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1247 into_input
, first_shift_count
,
1248 NULL_RTX
, unsignedp
, next_methods
);
1249 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1250 outof_input
, second_shift_count
,
1251 NULL_RTX
, unsignedp
, next_methods
);
1253 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1254 inter
= expand_binop (word_mode
, ior_optab
,
1255 outof_temp1
, outof_temp2
,
1256 outof_target
, unsignedp
, next_methods
);
1258 if (inter
!= 0 && inter
!= outof_target
)
1259 emit_move_insn (outof_target
, inter
);
1262 insns
= get_insns ();
1267 if (binoptab
->code
!= UNKNOWN
)
1268 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1272 /* We can't make this a no conflict block if this is a word swap,
1273 because the word swap case fails if the input and output values
1274 are in the same register. */
1275 if (shift_count
!= BITS_PER_WORD
)
1276 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1285 /* These can be done a word at a time by propagating carries. */
1286 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1287 && class == MODE_INT
1288 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1289 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1292 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1293 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1294 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1295 rtx xop0
, xop1
, xtarget
;
1297 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1298 value is one of those, use it. Otherwise, use 1 since it is the
1299 one easiest to get. */
1300 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1301 int normalizep
= STORE_FLAG_VALUE
;
1306 /* Prepare the operands. */
1307 xop0
= force_reg (mode
, op0
);
1308 xop1
= force_reg (mode
, op1
);
1310 xtarget
= gen_reg_rtx (mode
);
1312 if (target
== 0 || !REG_P (target
))
1315 /* Indicate for flow that the entire target reg is being set. */
1317 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1319 /* Do the actual arithmetic. */
1320 for (i
= 0; i
< nwords
; i
++)
1322 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1323 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1324 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1325 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1328 /* Main add/subtract of the input operands. */
1329 x
= expand_binop (word_mode
, binoptab
,
1330 op0_piece
, op1_piece
,
1331 target_piece
, unsignedp
, next_methods
);
1337 /* Store carry from main add/subtract. */
1338 carry_out
= gen_reg_rtx (word_mode
);
1339 carry_out
= emit_store_flag_force (carry_out
,
1340 (binoptab
== add_optab
1343 word_mode
, 1, normalizep
);
1350 /* Add/subtract previous carry to main result. */
1351 newx
= expand_binop (word_mode
,
1352 normalizep
== 1 ? binoptab
: otheroptab
,
1354 NULL_RTX
, 1, next_methods
);
1358 /* Get out carry from adding/subtracting carry in. */
1359 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1360 carry_tmp
= emit_store_flag_force (carry_tmp
,
1361 (binoptab
== add_optab
1364 word_mode
, 1, normalizep
);
1366 /* Logical-ior the two poss. carry together. */
1367 carry_out
= expand_binop (word_mode
, ior_optab
,
1368 carry_out
, carry_tmp
,
1369 carry_out
, 0, next_methods
);
1373 emit_move_insn (target_piece
, newx
);
1376 carry_in
= carry_out
;
1379 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1381 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1382 || ! rtx_equal_p (target
, xtarget
))
1384 rtx temp
= emit_move_insn (target
, xtarget
);
1386 set_unique_reg_note (temp
,
1388 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1399 delete_insns_since (last
);
1402 /* If we want to multiply two two-word values and have normal and widening
1403 multiplies of single-word values, we can do this with three smaller
1404 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1405 because we are not operating on one word at a time.
1407 The multiplication proceeds as follows:
1408 _______________________
1409 [__op0_high_|__op0_low__]
1410 _______________________
1411 * [__op1_high_|__op1_low__]
1412 _______________________________________________
1413 _______________________
1414 (1) [__op0_low__*__op1_low__]
1415 _______________________
1416 (2a) [__op0_low__*__op1_high_]
1417 _______________________
1418 (2b) [__op0_high_*__op1_low__]
1419 _______________________
1420 (3) [__op0_high_*__op1_high_]
1423 This gives a 4-word result. Since we are only interested in the
1424 lower 2 words, partial result (3) and the upper words of (2a) and
1425 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1426 calculated using non-widening multiplication.
1428 (1), however, needs to be calculated with an unsigned widening
1429 multiplication. If this operation is not directly supported we
1430 try using a signed widening multiplication and adjust the result.
1431 This adjustment works as follows:
1433 If both operands are positive then no adjustment is needed.
1435 If the operands have different signs, for example op0_low < 0 and
1436 op1_low >= 0, the instruction treats the most significant bit of
1437 op0_low as a sign bit instead of a bit with significance
1438 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1439 with 2**BITS_PER_WORD - op0_low, and two's complements the
1440 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1443 Similarly, if both operands are negative, we need to add
1444 (op0_low + op1_low) * 2**BITS_PER_WORD.
1446 We use a trick to adjust quickly. We logically shift op0_low right
1447 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1448 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1449 logical shift exists, we do an arithmetic right shift and subtract
1452 if (binoptab
== smul_optab
1453 && class == MODE_INT
1454 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1455 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1456 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1457 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1458 != CODE_FOR_nothing
)
1459 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1460 != CODE_FOR_nothing
)))
1462 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1463 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1464 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1465 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1466 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1467 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1469 rtx op0_xhigh
= NULL_RTX
;
1470 rtx op1_xhigh
= NULL_RTX
;
1472 /* If the target is the same as one of the inputs, don't use it. This
1473 prevents problems with the REG_EQUAL note. */
1474 if (target
== op0
|| target
== op1
1475 || (target
!= 0 && !REG_P (target
)))
1478 /* Multiply the two lower words to get a double-word product.
1479 If unsigned widening multiplication is available, use that;
1480 otherwise use the signed form and compensate. */
1482 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1484 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1485 target
, 1, OPTAB_DIRECT
);
1487 /* If we didn't succeed, delete everything we did so far. */
1489 delete_insns_since (last
);
1491 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1495 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1496 != CODE_FOR_nothing
)
1498 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1499 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1500 target
, 1, OPTAB_DIRECT
);
1501 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1502 NULL_RTX
, 1, next_methods
);
1504 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1505 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1508 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1509 NULL_RTX
, 0, next_methods
);
1511 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1512 op0_xhigh
, op0_xhigh
, 0,
1516 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1517 NULL_RTX
, 1, next_methods
);
1519 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1520 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1523 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1524 NULL_RTX
, 0, next_methods
);
1526 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1527 op1_xhigh
, op1_xhigh
, 0,
1532 /* If we have been able to directly compute the product of the
1533 low-order words of the operands and perform any required adjustments
1534 of the operands, we proceed by trying two more multiplications
1535 and then computing the appropriate sum.
1537 We have checked above that the required addition is provided.
1538 Full-word addition will normally always succeed, especially if
1539 it is provided at all, so we don't worry about its failure. The
1540 multiplication may well fail, however, so we do handle that. */
1542 if (product
&& op0_xhigh
&& op1_xhigh
)
1544 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1545 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1546 NULL_RTX
, 0, OPTAB_DIRECT
);
1548 if (!REG_P (product_high
))
1549 product_high
= force_reg (word_mode
, product_high
);
1552 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1553 product_high
, 0, next_methods
);
1555 if (temp
!= 0 && temp
!= product_high
)
1556 emit_move_insn (product_high
, temp
);
1559 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1560 NULL_RTX
, 0, OPTAB_DIRECT
);
1563 temp
= expand_binop (word_mode
, add_optab
, temp
,
1564 product_high
, product_high
,
1567 if (temp
!= 0 && temp
!= product_high
)
1568 emit_move_insn (product_high
, temp
);
1570 emit_move_insn (operand_subword (product
, high
, 1, mode
), product_high
);
1574 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1576 temp
= emit_move_insn (product
, product
);
1577 set_unique_reg_note (temp
,
1579 gen_rtx_fmt_ee (MULT
, mode
,
1588 /* If we get here, we couldn't do it for some reason even though we
1589 originally thought we could. Delete anything we've emitted in
1592 delete_insns_since (last
);
1595 /* It can't be open-coded in this mode.
1596 Use a library call if one is available and caller says that's ok. */
1598 if (binoptab
->handlers
[(int) mode
].libfunc
1599 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1603 enum machine_mode op1_mode
= mode
;
1610 op1_mode
= word_mode
;
1611 /* Specify unsigned here,
1612 since negative shift counts are meaningless. */
1613 op1x
= convert_to_mode (word_mode
, op1
, 1);
1616 if (GET_MODE (op0
) != VOIDmode
1617 && GET_MODE (op0
) != mode
)
1618 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1620 /* Pass 1 for NO_QUEUE so we don't lose any increments
1621 if the libcall is cse'd or moved. */
1622 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1623 NULL_RTX
, LCT_CONST
, mode
, 2,
1624 op0
, mode
, op1x
, op1_mode
);
1626 insns
= get_insns ();
1629 target
= gen_reg_rtx (mode
);
1630 emit_libcall_block (insns
, target
, value
,
1631 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1636 delete_insns_since (last
);
1638 /* It can't be done in this mode. Can we do it in a wider mode? */
1640 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1641 || methods
== OPTAB_MUST_WIDEN
))
1643 /* Caller says, don't even try. */
1644 delete_insns_since (entry_last
);
1648 /* Compute the value of METHODS to pass to recursive calls.
1649 Don't allow widening to be tried recursively. */
1651 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1653 /* Look for a wider mode of the same class for which it appears we can do
1656 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1658 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1659 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1661 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1662 != CODE_FOR_nothing
)
1663 || (methods
== OPTAB_LIB
1664 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1666 rtx xop0
= op0
, xop1
= op1
;
1669 /* For certain integer operations, we need not actually extend
1670 the narrow operands, as long as we will truncate
1671 the results to the same narrowness. */
1673 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1674 || binoptab
== xor_optab
1675 || binoptab
== add_optab
|| binoptab
== sub_optab
1676 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1677 && class == MODE_INT
)
1680 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1681 unsignedp
, no_extend
);
1683 /* The second operand of a shift must always be extended. */
1684 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1685 no_extend
&& binoptab
!= ashl_optab
);
1687 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1688 unsignedp
, methods
);
1691 if (class != MODE_INT
)
1694 target
= gen_reg_rtx (mode
);
1695 convert_move (target
, temp
, 0);
1699 return gen_lowpart (mode
, temp
);
1702 delete_insns_since (last
);
1707 delete_insns_since (entry_last
);
1711 /* Expand a binary operator which has both signed and unsigned forms.
1712 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1715 If we widen unsigned operands, we may use a signed wider operation instead
1716 of an unsigned wider operation, since the result would be the same. */
1719 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1720 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1721 enum optab_methods methods
)
1724 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1725 struct optab wide_soptab
;
1727 /* Do it without widening, if possible. */
1728 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1729 unsignedp
, OPTAB_DIRECT
);
1730 if (temp
|| methods
== OPTAB_DIRECT
)
1733 /* Try widening to a signed int. Make a fake signed optab that
1734 hides any signed insn for direct use. */
1735 wide_soptab
= *soptab
;
1736 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1737 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1739 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1740 unsignedp
, OPTAB_WIDEN
);
1742 /* For unsigned operands, try widening to an unsigned int. */
1743 if (temp
== 0 && unsignedp
)
1744 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1745 unsignedp
, OPTAB_WIDEN
);
1746 if (temp
|| methods
== OPTAB_WIDEN
)
1749 /* Use the right width lib call if that exists. */
1750 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1751 if (temp
|| methods
== OPTAB_LIB
)
1754 /* Must widen and use a lib call, use either signed or unsigned. */
1755 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1756 unsignedp
, methods
);
1760 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1761 unsignedp
, methods
);
1765 /* Generate code to perform an operation specified by UNOPPTAB
1766 on operand OP0, with two results to TARG0 and TARG1.
1767 We assume that the order of the operands for the instruction
1768 is TARG0, TARG1, OP0.
1770 Either TARG0 or TARG1 may be zero, but what that means is that
1771 the result is not actually wanted. We will generate it into
1772 a dummy pseudo-reg and discard it. They may not both be zero.
1774 Returns 1 if this operation can be performed; 0 if not. */
1777 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1780 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1781 enum mode_class
class;
1782 enum machine_mode wider_mode
;
1783 rtx entry_last
= get_last_insn ();
1786 class = GET_MODE_CLASS (mode
);
1789 op0
= force_not_mem (op0
);
1792 targ0
= gen_reg_rtx (mode
);
1794 targ1
= gen_reg_rtx (mode
);
1796 /* Record where to go back to if we fail. */
1797 last
= get_last_insn ();
1799 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1801 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1802 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1806 if (GET_MODE (xop0
) != VOIDmode
1807 && GET_MODE (xop0
) != mode0
)
1808 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1810 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1811 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1812 xop0
= copy_to_mode_reg (mode0
, xop0
);
1814 /* We could handle this, but we should always be called with a pseudo
1815 for our targets and all insns should take them as outputs. */
1816 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1817 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1820 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1827 delete_insns_since (last
);
1830 /* It can't be done in this mode. Can we do it in a wider mode? */
1832 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1834 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1835 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1837 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1838 != CODE_FOR_nothing
)
1840 rtx t0
= gen_reg_rtx (wider_mode
);
1841 rtx t1
= gen_reg_rtx (wider_mode
);
1842 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1844 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1846 convert_move (targ0
, t0
, unsignedp
);
1847 convert_move (targ1
, t1
, unsignedp
);
1851 delete_insns_since (last
);
1856 delete_insns_since (entry_last
);
1860 /* Generate code to perform an operation specified by BINOPTAB
1861 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1862 We assume that the order of the operands for the instruction
1863 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1864 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1866 Either TARG0 or TARG1 may be zero, but what that means is that
1867 the result is not actually wanted. We will generate it into
1868 a dummy pseudo-reg and discard it. They may not both be zero.
1870 Returns 1 if this operation can be performed; 0 if not. */
1873 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1876 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1877 enum mode_class
class;
1878 enum machine_mode wider_mode
;
1879 rtx entry_last
= get_last_insn ();
1882 class = GET_MODE_CLASS (mode
);
1886 op0
= force_not_mem (op0
);
1887 op1
= force_not_mem (op1
);
1890 /* If we are inside an appropriately-short loop and we are optimizing,
1891 force expensive constants into a register. */
1892 if (CONSTANT_P (op0
) && optimize
1893 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1894 op0
= force_reg (mode
, op0
);
1896 if (CONSTANT_P (op1
) && optimize
1897 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1898 op1
= force_reg (mode
, op1
);
1901 targ0
= gen_reg_rtx (mode
);
1903 targ1
= gen_reg_rtx (mode
);
1905 /* Record where to go back to if we fail. */
1906 last
= get_last_insn ();
1908 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1910 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1911 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1912 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1914 rtx xop0
= op0
, xop1
= op1
;
1916 /* In case the insn wants input operands in modes different from
1917 those of the actual operands, convert the operands. It would
1918 seem that we don't need to convert CONST_INTs, but we do, so
1919 that they're properly zero-extended, sign-extended or truncated
1922 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1923 xop0
= convert_modes (mode0
,
1924 GET_MODE (op0
) != VOIDmode
1929 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1930 xop1
= convert_modes (mode1
,
1931 GET_MODE (op1
) != VOIDmode
1936 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1937 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1938 xop0
= copy_to_mode_reg (mode0
, xop0
);
1940 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1941 xop1
= copy_to_mode_reg (mode1
, xop1
);
1943 /* We could handle this, but we should always be called with a pseudo
1944 for our targets and all insns should take them as outputs. */
1945 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1946 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1949 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1956 delete_insns_since (last
);
1959 /* It can't be done in this mode. Can we do it in a wider mode? */
1961 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1963 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1964 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1966 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1967 != CODE_FOR_nothing
)
1969 rtx t0
= gen_reg_rtx (wider_mode
);
1970 rtx t1
= gen_reg_rtx (wider_mode
);
1971 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1972 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1974 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1977 convert_move (targ0
, t0
, unsignedp
);
1978 convert_move (targ1
, t1
, unsignedp
);
1982 delete_insns_since (last
);
1987 delete_insns_since (entry_last
);
1991 /* Expand the two-valued library call indicated by BINOPTAB, but
1992 preserve only one of the values. If TARG0 is non-NULL, the first
1993 value is placed into TARG0; otherwise the second value is placed
1994 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
1995 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
1996 This routine assumes that the value returned by the library call is
1997 as if the return value was of an integral mode twice as wide as the
1998 mode of OP0. Returns 1 if the call was successful. */
2001 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2002 rtx targ0
, rtx targ1
, enum rtx_code code
)
2004 enum machine_mode mode
;
2005 enum machine_mode libval_mode
;
2009 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2010 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2013 mode
= GET_MODE (op0
);
2014 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2017 /* The value returned by the library function will have twice as
2018 many bits as the nominal MODE. */
2019 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2022 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2023 NULL_RTX
, LCT_CONST
,
2027 /* Get the part of VAL containing the value that we want. */
2028 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2029 targ0
? 0 : GET_MODE_SIZE (mode
));
2030 insns
= get_insns ();
2032 /* Move the into the desired location. */
2033 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2034 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2040 /* Wrapper around expand_unop which takes an rtx code to specify
2041 the operation to perform, not an optab pointer. All other
2042 arguments are the same. */
2044 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2045 rtx target
, int unsignedp
)
2047 optab unop
= code_to_optab
[(int) code
];
2051 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2057 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2059 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2061 enum mode_class
class = GET_MODE_CLASS (mode
);
2062 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2064 enum machine_mode wider_mode
;
2065 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2066 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2068 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2069 != CODE_FOR_nothing
)
2071 rtx xop0
, temp
, last
;
2073 last
= get_last_insn ();
2076 target
= gen_reg_rtx (mode
);
2077 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2078 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2080 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2081 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2082 - GET_MODE_BITSIZE (mode
)),
2083 target
, true, OPTAB_DIRECT
);
2085 delete_insns_since (last
);
2094 /* Try calculating (parity x) as (and (popcount x) 1), where
2095 popcount can also be done in a wider mode. */
2097 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2099 enum mode_class
class = GET_MODE_CLASS (mode
);
2100 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2102 enum machine_mode wider_mode
;
2103 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2104 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2106 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2107 != CODE_FOR_nothing
)
2109 rtx xop0
, temp
, last
;
2111 last
= get_last_insn ();
2114 target
= gen_reg_rtx (mode
);
2115 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2116 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2119 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2120 target
, true, OPTAB_DIRECT
);
2122 delete_insns_since (last
);
2131 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2132 conditions, VAL may already be a SUBREG against which we cannot generate
2133 a further SUBREG. In this case, we expect forcing the value into a
2134 register will work around the situation. */
2137 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2138 enum machine_mode imode
)
2141 ret
= lowpart_subreg (omode
, val
, imode
);
2144 val
= force_reg (imode
, val
);
2145 ret
= lowpart_subreg (omode
, val
, imode
);
2146 gcc_assert (ret
!= NULL
);
2151 /* Expand a floating point absolute value or negation operation via a
2152 logical operation on the sign bit. */
2155 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2156 rtx op0
, rtx target
)
2158 const struct real_format
*fmt
;
2159 int bitpos
, word
, nwords
, i
;
2160 enum machine_mode imode
;
2161 HOST_WIDE_INT hi
, lo
;
2164 /* The format has to have a simple sign bit. */
2165 fmt
= REAL_MODE_FORMAT (mode
);
2169 bitpos
= fmt
->signbit
;
2173 /* Don't create negative zeros if the format doesn't support them. */
2174 if (code
== NEG
&& !fmt
->has_signed_zero
)
2177 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2179 imode
= int_mode_for_mode (mode
);
2180 if (imode
== BLKmode
)
2189 if (FLOAT_WORDS_BIG_ENDIAN
)
2190 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2192 word
= bitpos
/ BITS_PER_WORD
;
2193 bitpos
= bitpos
% BITS_PER_WORD
;
2194 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2197 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2200 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2204 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2210 if (target
== 0 || target
== op0
)
2211 target
= gen_reg_rtx (mode
);
2217 for (i
= 0; i
< nwords
; ++i
)
2219 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2220 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2224 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2226 immed_double_const (lo
, hi
, imode
),
2227 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2228 if (temp
!= targ_piece
)
2229 emit_move_insn (targ_piece
, temp
);
2232 emit_move_insn (targ_piece
, op0_piece
);
2235 insns
= get_insns ();
2238 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2239 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2243 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2244 gen_lowpart (imode
, op0
),
2245 immed_double_const (lo
, hi
, imode
),
2246 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2247 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2249 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2250 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2256 /* Generate code to perform an operation specified by UNOPTAB
2257 on operand OP0, with result having machine-mode MODE.
2259 UNSIGNEDP is for the case where we have to widen the operands
2260 to perform the operation. It says to use zero-extension.
2262 If TARGET is nonzero, the value
2263 is generated there, if it is convenient to do so.
2264 In all cases an rtx is returned for the locus of the value;
2265 this may or may not be TARGET. */
2268 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2271 enum mode_class
class;
2272 enum machine_mode wider_mode
;
2274 rtx last
= get_last_insn ();
2277 class = GET_MODE_CLASS (mode
);
2280 op0
= force_not_mem (op0
);
2282 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2284 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2285 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2291 temp
= gen_reg_rtx (mode
);
2293 if (GET_MODE (xop0
) != VOIDmode
2294 && GET_MODE (xop0
) != mode0
)
2295 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2297 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2299 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2300 xop0
= copy_to_mode_reg (mode0
, xop0
);
2302 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2303 temp
= gen_reg_rtx (mode
);
2305 pat
= GEN_FCN (icode
) (temp
, xop0
);
2308 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2309 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2311 delete_insns_since (last
);
2312 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2320 delete_insns_since (last
);
2323 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2325 /* Widening clz needs special treatment. */
2326 if (unoptab
== clz_optab
)
2328 temp
= widen_clz (mode
, op0
, target
);
2335 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2336 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2337 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2339 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2343 /* For certain operations, we need not actually extend
2344 the narrow operand, as long as we will truncate the
2345 results to the same narrowness. */
2347 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2348 (unoptab
== neg_optab
2349 || unoptab
== one_cmpl_optab
)
2350 && class == MODE_INT
);
2352 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2357 if (class != MODE_INT
)
2360 target
= gen_reg_rtx (mode
);
2361 convert_move (target
, temp
, 0);
2365 return gen_lowpart (mode
, temp
);
2368 delete_insns_since (last
);
2372 /* These can be done a word at a time. */
2373 if (unoptab
== one_cmpl_optab
2374 && class == MODE_INT
2375 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2376 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2381 if (target
== 0 || target
== op0
)
2382 target
= gen_reg_rtx (mode
);
2386 /* Do the actual arithmetic. */
2387 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2389 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2390 rtx x
= expand_unop (word_mode
, unoptab
,
2391 operand_subword_force (op0
, i
, mode
),
2392 target_piece
, unsignedp
);
2394 if (target_piece
!= x
)
2395 emit_move_insn (target_piece
, x
);
2398 insns
= get_insns ();
2401 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2402 gen_rtx_fmt_e (unoptab
->code
, mode
,
2407 if (unoptab
->code
== NEG
)
2409 /* Try negating floating point values by flipping the sign bit. */
2410 if (class == MODE_FLOAT
)
2412 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2417 /* If there is no negation pattern, and we have no negative zero,
2418 try subtracting from zero. */
2419 if (!HONOR_SIGNED_ZEROS (mode
))
2421 temp
= expand_binop (mode
, (unoptab
== negv_optab
2422 ? subv_optab
: sub_optab
),
2423 CONST0_RTX (mode
), op0
, target
,
2424 unsignedp
, OPTAB_DIRECT
);
2430 /* Try calculating parity (x) as popcount (x) % 2. */
2431 if (unoptab
== parity_optab
)
2433 temp
= expand_parity (mode
, op0
, target
);
2439 /* Now try a library call in this mode. */
2440 if (unoptab
->handlers
[(int) mode
].libfunc
)
2444 enum machine_mode outmode
= mode
;
2446 /* All of these functions return small values. Thus we choose to
2447 have them return something that isn't a double-word. */
2448 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2449 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2451 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2455 /* Pass 1 for NO_QUEUE so we don't lose any increments
2456 if the libcall is cse'd or moved. */
2457 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2458 NULL_RTX
, LCT_CONST
, outmode
,
2460 insns
= get_insns ();
2463 target
= gen_reg_rtx (outmode
);
2464 emit_libcall_block (insns
, target
, value
,
2465 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2470 /* It can't be done in this mode. Can we do it in a wider mode? */
2472 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2474 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2475 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2477 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2478 != CODE_FOR_nothing
)
2479 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2483 /* For certain operations, we need not actually extend
2484 the narrow operand, as long as we will truncate the
2485 results to the same narrowness. */
2487 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2488 (unoptab
== neg_optab
2489 || unoptab
== one_cmpl_optab
)
2490 && class == MODE_INT
);
2492 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2495 /* If we are generating clz using wider mode, adjust the
2497 if (unoptab
== clz_optab
&& temp
!= 0)
2498 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2499 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2500 - GET_MODE_BITSIZE (mode
)),
2501 target
, true, OPTAB_DIRECT
);
2505 if (class != MODE_INT
)
2508 target
= gen_reg_rtx (mode
);
2509 convert_move (target
, temp
, 0);
2513 return gen_lowpart (mode
, temp
);
2516 delete_insns_since (last
);
2521 /* One final attempt at implementing negation via subtraction,
2522 this time allowing widening of the operand. */
2523 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2526 temp
= expand_binop (mode
,
2527 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2528 CONST0_RTX (mode
), op0
,
2529 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2537 /* Emit code to compute the absolute value of OP0, with result to
2538 TARGET if convenient. (TARGET may be 0.) The return value says
2539 where the result actually is to be found.
2541 MODE is the mode of the operand; the mode of the result is
2542 different but can be deduced from MODE.
2547 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2548 int result_unsignedp
)
2553 result_unsignedp
= 1;
2555 /* First try to do it with a special abs instruction. */
2556 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2561 /* For floating point modes, try clearing the sign bit. */
2562 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2564 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2569 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2570 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2571 && !HONOR_SIGNED_ZEROS (mode
))
2573 rtx last
= get_last_insn ();
2575 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2577 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2583 delete_insns_since (last
);
2586 /* If this machine has expensive jumps, we can do integer absolute
2587 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2588 where W is the width of MODE. */
2590 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2592 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2593 size_int (GET_MODE_BITSIZE (mode
) - 1),
2596 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2599 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2600 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2610 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2611 int result_unsignedp
, int safe
)
2616 result_unsignedp
= 1;
2618 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2622 /* If that does not win, use conditional jump and negate. */
2624 /* It is safe to use the target if it is the same
2625 as the source if this is also a pseudo register */
2626 if (op0
== target
&& REG_P (op0
)
2627 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2630 op1
= gen_label_rtx ();
2631 if (target
== 0 || ! safe
2632 || GET_MODE (target
) != mode
2633 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2635 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2636 target
= gen_reg_rtx (mode
);
2638 emit_move_insn (target
, op0
);
2641 /* If this mode is an integer too wide to compare properly,
2642 compare word by word. Rely on CSE to optimize constant cases. */
2643 if (GET_MODE_CLASS (mode
) == MODE_INT
2644 && ! can_compare_p (GE
, mode
, ccp_jump
))
2645 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2648 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2649 NULL_RTX
, NULL_RTX
, op1
);
2651 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2654 emit_move_insn (target
, op0
);
2660 /* A subroutine of expand_copysign, perform the copysign operation using the
2661 abs and neg primitives advertised to exist on the target. The assumption
2662 is that we have a split register file, and leaving op0 in fp registers,
2663 and not playing with subregs so much, will help the register allocator. */
2666 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2667 int bitpos
, bool op0_is_abs
)
2669 enum machine_mode imode
;
2670 HOST_WIDE_INT hi
, lo
;
2679 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2686 if (target
== NULL_RTX
)
2687 target
= copy_to_reg (op0
);
2689 emit_move_insn (target
, op0
);
2692 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2694 imode
= int_mode_for_mode (mode
);
2695 if (imode
== BLKmode
)
2697 op1
= gen_lowpart (imode
, op1
);
2702 if (FLOAT_WORDS_BIG_ENDIAN
)
2703 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2705 word
= bitpos
/ BITS_PER_WORD
;
2706 bitpos
= bitpos
% BITS_PER_WORD
;
2707 op1
= operand_subword_force (op1
, word
, mode
);
2710 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2713 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2717 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2721 op1
= expand_binop (imode
, and_optab
, op1
,
2722 immed_double_const (lo
, hi
, imode
),
2723 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2725 label
= gen_label_rtx ();
2726 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2728 if (GET_CODE (op0
) == CONST_DOUBLE
)
2729 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2731 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2733 emit_move_insn (target
, op0
);
2741 /* A subroutine of expand_copysign, perform the entire copysign operation
2742 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2743 is true if op0 is known to have its sign bit clear. */
2746 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2747 int bitpos
, bool op0_is_abs
)
2749 enum machine_mode imode
;
2750 HOST_WIDE_INT hi
, lo
;
2751 int word
, nwords
, i
;
2754 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2756 imode
= int_mode_for_mode (mode
);
2757 if (imode
== BLKmode
)
2766 if (FLOAT_WORDS_BIG_ENDIAN
)
2767 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2769 word
= bitpos
/ BITS_PER_WORD
;
2770 bitpos
= bitpos
% BITS_PER_WORD
;
2771 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2774 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2777 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2781 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2785 if (target
== 0 || target
== op0
|| target
== op1
)
2786 target
= gen_reg_rtx (mode
);
2792 for (i
= 0; i
< nwords
; ++i
)
2794 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2795 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2800 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2801 immed_double_const (~lo
, ~hi
, imode
),
2802 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2804 op1
= expand_binop (imode
, and_optab
,
2805 operand_subword_force (op1
, i
, mode
),
2806 immed_double_const (lo
, hi
, imode
),
2807 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2809 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2810 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2811 if (temp
!= targ_piece
)
2812 emit_move_insn (targ_piece
, temp
);
2815 emit_move_insn (targ_piece
, op0_piece
);
2818 insns
= get_insns ();
2821 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2825 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2826 immed_double_const (lo
, hi
, imode
),
2827 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2829 op0
= gen_lowpart (imode
, op0
);
2831 op0
= expand_binop (imode
, and_optab
, op0
,
2832 immed_double_const (~lo
, ~hi
, imode
),
2833 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2835 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2836 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2837 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2843 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2844 scalar floating point mode. Return NULL if we do not know how to
2845 expand the operation inline. */
2848 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2850 enum machine_mode mode
= GET_MODE (op0
);
2851 const struct real_format
*fmt
;
2856 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2857 gcc_assert (GET_MODE (op1
) == mode
);
2859 /* First try to do it with a special instruction. */
2860 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2861 target
, 0, OPTAB_DIRECT
);
2865 fmt
= REAL_MODE_FORMAT (mode
);
2866 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2869 bitpos
= fmt
->signbit
;
2874 if (GET_CODE (op0
) == CONST_DOUBLE
)
2876 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2877 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2881 if (GET_CODE (op0
) == CONST_DOUBLE
2882 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2883 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
))
2885 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2886 bitpos
, op0_is_abs
);
2891 return expand_copysign_bit (mode
, op0
, op1
, target
, bitpos
, op0_is_abs
);
2894 /* Generate an instruction whose insn-code is INSN_CODE,
2895 with two operands: an output TARGET and an input OP0.
2896 TARGET *must* be nonzero, and the output is always stored there.
2897 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2898 the value that is stored into TARGET. */
2901 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2904 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2909 /* Sign and zero extension from memory is often done specially on
2910 RISC machines, so forcing into a register here can pessimize
2912 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2913 op0
= force_not_mem (op0
);
2915 /* Now, if insn does not accept our operands, put them into pseudos. */
2917 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2918 op0
= copy_to_mode_reg (mode0
, op0
);
2920 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2921 || (flag_force_mem
&& MEM_P (temp
)))
2922 temp
= gen_reg_rtx (GET_MODE (temp
));
2924 pat
= GEN_FCN (icode
) (temp
, op0
);
2926 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2927 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2932 emit_move_insn (target
, temp
);
2935 /* Emit code to perform a series of operations on a multi-word quantity, one
2938 Such a block is preceded by a CLOBBER of the output, consists of multiple
2939 insns, each setting one word of the output, and followed by a SET copying
2940 the output to itself.
2942 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2943 note indicating that it doesn't conflict with the (also multi-word)
2944 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2947 INSNS is a block of code generated to perform the operation, not including
2948 the CLOBBER and final copy. All insns that compute intermediate values
2949 are first emitted, followed by the block as described above.
2951 TARGET, OP0, and OP1 are the output and inputs of the operations,
2952 respectively. OP1 may be zero for a unary operation.
2954 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2957 If TARGET is not a register, INSNS is simply emitted with no special
2958 processing. Likewise if anything in INSNS is not an INSN or if
2959 there is a libcall block inside INSNS.
2961 The final insn emitted is returned. */
2964 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2966 rtx prev
, next
, first
, last
, insn
;
2968 if (!REG_P (target
) || reload_in_progress
)
2969 return emit_insn (insns
);
2971 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2972 if (!NONJUMP_INSN_P (insn
)
2973 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2974 return emit_insn (insns
);
2976 /* First emit all insns that do not store into words of the output and remove
2977 these from the list. */
2978 for (insn
= insns
; insn
; insn
= next
)
2983 next
= NEXT_INSN (insn
);
2985 /* Some ports (cris) create a libcall regions at their own. We must
2986 avoid any potential nesting of LIBCALLs. */
2987 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
2988 remove_note (insn
, note
);
2989 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
2990 remove_note (insn
, note
);
2992 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
2993 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2994 set
= PATTERN (insn
);
2995 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2997 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2998 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3000 set
= XVECEXP (PATTERN (insn
), 0, i
);
3008 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3010 if (PREV_INSN (insn
))
3011 NEXT_INSN (PREV_INSN (insn
)) = next
;
3016 PREV_INSN (next
) = PREV_INSN (insn
);
3022 prev
= get_last_insn ();
3024 /* Now write the CLOBBER of the output, followed by the setting of each
3025 of the words, followed by the final copy. */
3026 if (target
!= op0
&& target
!= op1
)
3027 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3029 for (insn
= insns
; insn
; insn
= next
)
3031 next
= NEXT_INSN (insn
);
3034 if (op1
&& REG_P (op1
))
3035 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3038 if (op0
&& REG_P (op0
))
3039 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3043 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3044 != CODE_FOR_nothing
)
3046 last
= emit_move_insn (target
, target
);
3048 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3052 last
= get_last_insn ();
3054 /* Remove any existing REG_EQUAL note from "last", or else it will
3055 be mistaken for a note referring to the full contents of the
3056 alleged libcall value when found together with the REG_RETVAL
3057 note added below. An existing note can come from an insn
3058 expansion at "last". */
3059 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3063 first
= get_insns ();
3065 first
= NEXT_INSN (prev
);
3067 /* Encapsulate the block so it gets manipulated as a unit. */
3068 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3070 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3075 /* Emit code to make a call to a constant function or a library call.
3077 INSNS is a list containing all insns emitted in the call.
3078 These insns leave the result in RESULT. Our block is to copy RESULT
3079 to TARGET, which is logically equivalent to EQUIV.
3081 We first emit any insns that set a pseudo on the assumption that these are
3082 loading constants into registers; doing so allows them to be safely cse'ed
3083 between blocks. Then we emit all the other insns in the block, followed by
3084 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3085 note with an operand of EQUIV.
3087 Moving assignments to pseudos outside of the block is done to improve
3088 the generated code, but is not required to generate correct code,
3089 hence being unable to move an assignment is not grounds for not making
3090 a libcall block. There are two reasons why it is safe to leave these
3091 insns inside the block: First, we know that these pseudos cannot be
3092 used in generated RTL outside the block since they are created for
3093 temporary purposes within the block. Second, CSE will not record the
3094 values of anything set inside a libcall block, so we know they must
3095 be dead at the end of the block.
3097 Except for the first group of insns (the ones setting pseudos), the
3098 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3101 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3103 rtx final_dest
= target
;
3104 rtx prev
, next
, first
, last
, insn
;
3106 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3107 into a MEM later. Protect the libcall block from this change. */
3108 if (! REG_P (target
) || REG_USERVAR_P (target
))
3109 target
= gen_reg_rtx (GET_MODE (target
));
3111 /* If we're using non-call exceptions, a libcall corresponding to an
3112 operation that may trap may also trap. */
3113 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3115 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3118 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3120 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3121 remove_note (insn
, note
);
3125 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3126 reg note to indicate that this call cannot throw or execute a nonlocal
3127 goto (unless there is already a REG_EH_REGION note, in which case
3129 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3132 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3135 XEXP (note
, 0) = constm1_rtx
;
3137 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3141 /* First emit all insns that set pseudos. Remove them from the list as
3142 we go. Avoid insns that set pseudos which were referenced in previous
3143 insns. These can be generated by move_by_pieces, for example,
3144 to update an address. Similarly, avoid insns that reference things
3145 set in previous insns. */
3147 for (insn
= insns
; insn
; insn
= next
)
3149 rtx set
= single_set (insn
);
3152 /* Some ports (cris) create a libcall regions at their own. We must
3153 avoid any potential nesting of LIBCALLs. */
3154 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3155 remove_note (insn
, note
);
3156 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3157 remove_note (insn
, note
);
3159 next
= NEXT_INSN (insn
);
3161 if (set
!= 0 && REG_P (SET_DEST (set
))
3162 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3164 || ((! INSN_P(insns
)
3165 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3166 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3167 && ! modified_in_p (SET_SRC (set
), insns
)
3168 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3170 if (PREV_INSN (insn
))
3171 NEXT_INSN (PREV_INSN (insn
)) = next
;
3176 PREV_INSN (next
) = PREV_INSN (insn
);
3181 /* Some ports use a loop to copy large arguments onto the stack.
3182 Don't move anything outside such a loop. */
3187 prev
= get_last_insn ();
3189 /* Write the remaining insns followed by the final copy. */
3191 for (insn
= insns
; insn
; insn
= next
)
3193 next
= NEXT_INSN (insn
);
3198 last
= emit_move_insn (target
, result
);
3199 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3200 != CODE_FOR_nothing
)
3201 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3204 /* Remove any existing REG_EQUAL note from "last", or else it will
3205 be mistaken for a note referring to the full contents of the
3206 libcall value when found together with the REG_RETVAL note added
3207 below. An existing note can come from an insn expansion at
3209 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3212 if (final_dest
!= target
)
3213 emit_move_insn (final_dest
, target
);
3216 first
= get_insns ();
3218 first
= NEXT_INSN (prev
);
3220 /* Encapsulate the block so it gets manipulated as a unit. */
3221 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3223 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3224 when the encapsulated region would not be in one basic block,
3225 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3227 bool attach_libcall_retval_notes
= true;
3228 next
= NEXT_INSN (last
);
3229 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3230 if (control_flow_insn_p (insn
))
3232 attach_libcall_retval_notes
= false;
3236 if (attach_libcall_retval_notes
)
3238 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3240 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3246 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3247 PURPOSE describes how this comparison will be used. CODE is the rtx
3248 comparison code we will be using.
3250 ??? Actually, CODE is slightly weaker than that. A target is still
3251 required to implement all of the normal bcc operations, but not
3252 required to implement all (or any) of the unordered bcc operations. */
3255 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3256 enum can_compare_purpose purpose
)
3260 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3262 if (purpose
== ccp_jump
)
3263 return bcc_gen_fctn
[(int) code
] != NULL
;
3264 else if (purpose
== ccp_store_flag
)
3265 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3267 /* There's only one cmov entry point, and it's allowed to fail. */
3270 if (purpose
== ccp_jump
3271 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3273 if (purpose
== ccp_cmov
3274 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3276 if (purpose
== ccp_store_flag
3277 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3279 mode
= GET_MODE_WIDER_MODE (mode
);
3281 while (mode
!= VOIDmode
);
3286 /* This function is called when we are going to emit a compare instruction that
3287 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3289 *PMODE is the mode of the inputs (in case they are const_int).
3290 *PUNSIGNEDP nonzero says that the operands are unsigned;
3291 this matters if they need to be widened.
3293 If they have mode BLKmode, then SIZE specifies the size of both operands.
3295 This function performs all the setup necessary so that the caller only has
3296 to emit a single comparison insn. This setup can involve doing a BLKmode
3297 comparison or emitting a library call to perform the comparison if no insn
3298 is available to handle it.
3299 The values which are passed in through pointers can be modified; the caller
3300 should perform the comparison on the modified values. */
3303 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3304 enum machine_mode
*pmode
, int *punsignedp
,
3305 enum can_compare_purpose purpose
)
3307 enum machine_mode mode
= *pmode
;
3308 rtx x
= *px
, y
= *py
;
3309 int unsignedp
= *punsignedp
;
3310 enum mode_class
class;
3312 class = GET_MODE_CLASS (mode
);
3314 /* They could both be VOIDmode if both args are immediate constants,
3315 but we should fold that at an earlier stage.
3316 With no special code here, this will call abort,
3317 reminding the programmer to implement such folding. */
3319 if (mode
!= BLKmode
&& flag_force_mem
)
3321 /* Load duplicate non-volatile operands once. */
3322 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3324 x
= force_not_mem (x
);
3329 x
= force_not_mem (x
);
3330 y
= force_not_mem (y
);
3334 /* If we are inside an appropriately-short loop and we are optimizing,
3335 force expensive constants into a register. */
3336 if (CONSTANT_P (x
) && optimize
3337 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3338 x
= force_reg (mode
, x
);
3340 if (CONSTANT_P (y
) && optimize
3341 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3342 y
= force_reg (mode
, y
);
3345 /* Abort if we have a non-canonical comparison. The RTL documentation
3346 states that canonical comparisons are required only for targets which
3348 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3352 /* Don't let both operands fail to indicate the mode. */
3353 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3354 x
= force_reg (mode
, x
);
3356 /* Handle all BLKmode compares. */
3358 if (mode
== BLKmode
)
3360 enum machine_mode cmp_mode
, result_mode
;
3361 enum insn_code cmp_code
;
3366 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3371 /* Try to use a memory block compare insn - either cmpstr
3372 or cmpmem will do. */
3373 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3374 cmp_mode
!= VOIDmode
;
3375 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3377 cmp_code
= cmpmem_optab
[cmp_mode
];
3378 if (cmp_code
== CODE_FOR_nothing
)
3379 cmp_code
= cmpstr_optab
[cmp_mode
];
3380 if (cmp_code
== CODE_FOR_nothing
)
3383 /* Must make sure the size fits the insn's mode. */
3384 if ((GET_CODE (size
) == CONST_INT
3385 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3386 || (GET_MODE_BITSIZE (GET_MODE (size
))
3387 > GET_MODE_BITSIZE (cmp_mode
)))
3390 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3391 result
= gen_reg_rtx (result_mode
);
3392 size
= convert_to_mode (cmp_mode
, size
, 1);
3393 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3397 *pmode
= result_mode
;
3401 /* Otherwise call a library function, memcmp. */
3402 libfunc
= memcmp_libfunc
;
3403 length_type
= sizetype
;
3404 result_mode
= TYPE_MODE (integer_type_node
);
3405 cmp_mode
= TYPE_MODE (length_type
);
3406 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3407 TYPE_UNSIGNED (length_type
));
3409 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3416 *pmode
= result_mode
;
3420 /* Don't allow operands to the compare to trap, as that can put the
3421 compare and branch in different basic blocks. */
3422 if (flag_non_call_exceptions
)
3425 x
= force_reg (mode
, x
);
3427 y
= force_reg (mode
, y
);
3432 if (can_compare_p (*pcomparison
, mode
, purpose
))
3435 /* Handle a lib call just for the mode we are using. */
3437 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3439 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3442 /* If we want unsigned, and this mode has a distinct unsigned
3443 comparison routine, use that. */
3444 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3445 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3447 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3448 word_mode
, 2, x
, mode
, y
, mode
);
3452 if (TARGET_LIB_INT_CMP_BIASED
)
3453 /* Integer comparison returns a result that must be compared
3454 against 1, so that even if we do an unsigned compare
3455 afterward, there is still a value that can represent the
3456 result "less than". */
3466 if (class == MODE_FLOAT
)
3467 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3473 /* Before emitting an insn with code ICODE, make sure that X, which is going
3474 to be used for operand OPNUM of the insn, is converted from mode MODE to
3475 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3476 that it is accepted by the operand predicate. Return the new value. */
3479 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3480 enum machine_mode wider_mode
, int unsignedp
)
3482 if (mode
!= wider_mode
)
3483 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3485 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3486 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3490 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3496 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3497 we can do the comparison.
3498 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3499 be NULL_RTX which indicates that only a comparison is to be generated. */
3502 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3503 enum rtx_code comparison
, int unsignedp
, rtx label
)
3505 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3506 enum mode_class
class = GET_MODE_CLASS (mode
);
3507 enum machine_mode wider_mode
= mode
;
3509 /* Try combined insns first. */
3512 enum insn_code icode
;
3513 PUT_MODE (test
, wider_mode
);
3517 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3519 if (icode
!= CODE_FOR_nothing
3520 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3522 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3523 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3524 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3529 /* Handle some compares against zero. */
3530 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3531 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3533 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3534 emit_insn (GEN_FCN (icode
) (x
));
3536 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3540 /* Handle compares for which there is a directly suitable insn. */
3542 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3543 if (icode
!= CODE_FOR_nothing
)
3545 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3546 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3547 emit_insn (GEN_FCN (icode
) (x
, y
));
3549 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3553 if (class != MODE_INT
&& class != MODE_FLOAT
3554 && class != MODE_COMPLEX_FLOAT
)
3557 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3559 while (wider_mode
!= VOIDmode
);
3564 /* Generate code to compare X with Y so that the condition codes are
3565 set and to jump to LABEL if the condition is true. If X is a
3566 constant and Y is not a constant, then the comparison is swapped to
3567 ensure that the comparison RTL has the canonical form.
3569 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3570 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3571 the proper branch condition code.
3573 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3575 MODE is the mode of the inputs (in case they are const_int).
3577 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3578 be passed unchanged to emit_cmp_insn, then potentially converted into an
3579 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3582 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3583 enum machine_mode mode
, int unsignedp
, rtx label
)
3585 rtx op0
= x
, op1
= y
;
3587 /* Swap operands and condition to ensure canonical RTL. */
3588 if (swap_commutative_operands_p (x
, y
))
3590 /* If we're not emitting a branch, this means some caller
3596 comparison
= swap_condition (comparison
);
3600 /* If OP0 is still a constant, then both X and Y must be constants. Force
3601 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3603 if (CONSTANT_P (op0
))
3604 op0
= force_reg (mode
, op0
);
3608 comparison
= unsigned_condition (comparison
);
3610 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3612 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3615 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3618 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3619 enum machine_mode mode
, int unsignedp
)
3621 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3624 /* Emit a library call comparison between floating point X and Y.
3625 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3628 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3629 enum machine_mode
*pmode
, int *punsignedp
)
3631 enum rtx_code comparison
= *pcomparison
;
3632 enum rtx_code swapped
= swap_condition (comparison
);
3633 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3636 enum machine_mode orig_mode
= GET_MODE (x
);
3637 enum machine_mode mode
;
3638 rtx value
, target
, insns
, equiv
;
3640 bool reversed_p
= false;
3642 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3644 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3647 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3650 tmp
= x
; x
= y
; y
= tmp
;
3651 comparison
= swapped
;
3655 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3656 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3658 comparison
= reversed
;
3664 if (mode
== VOIDmode
)
3667 if (mode
!= orig_mode
)
3669 x
= convert_to_mode (mode
, x
, 0);
3670 y
= convert_to_mode (mode
, y
, 0);
3673 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3674 the RTL. The allows the RTL optimizers to delete the libcall if the
3675 condition can be determined at compile-time. */
3676 if (comparison
== UNORDERED
)
3678 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3679 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3680 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3681 temp
, const_true_rtx
, equiv
);
3685 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3686 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3688 rtx true_rtx
, false_rtx
;
3693 true_rtx
= const0_rtx
;
3694 false_rtx
= const_true_rtx
;
3698 true_rtx
= const_true_rtx
;
3699 false_rtx
= const0_rtx
;
3703 true_rtx
= const1_rtx
;
3704 false_rtx
= const0_rtx
;
3708 true_rtx
= const0_rtx
;
3709 false_rtx
= constm1_rtx
;
3713 true_rtx
= constm1_rtx
;
3714 false_rtx
= const0_rtx
;
3718 true_rtx
= const0_rtx
;
3719 false_rtx
= const1_rtx
;
3725 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3726 equiv
, true_rtx
, false_rtx
);
3731 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3732 word_mode
, 2, x
, mode
, y
, mode
);
3733 insns
= get_insns ();
3736 target
= gen_reg_rtx (word_mode
);
3737 emit_libcall_block (insns
, target
, value
, equiv
);
3739 if (comparison
== UNORDERED
3740 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3741 comparison
= reversed_p
? EQ
: NE
;
3746 *pcomparison
= comparison
;
3750 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3753 emit_indirect_jump (rtx loc
)
3755 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3757 loc
= copy_to_mode_reg (Pmode
, loc
);
3759 emit_jump_insn (gen_indirect_jump (loc
));
3763 #ifdef HAVE_conditional_move
3765 /* Emit a conditional move instruction if the machine supports one for that
3766 condition and machine mode.
3768 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3769 the mode to use should they be constants. If it is VOIDmode, they cannot
3772 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3773 should be stored there. MODE is the mode to use should they be constants.
3774 If it is VOIDmode, they cannot both be constants.
3776 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3777 is not supported. */
3780 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3781 enum machine_mode cmode
, rtx op2
, rtx op3
,
3782 enum machine_mode mode
, int unsignedp
)
3784 rtx tem
, subtarget
, comparison
, insn
;
3785 enum insn_code icode
;
3786 enum rtx_code reversed
;
3788 /* If one operand is constant, make it the second one. Only do this
3789 if the other operand is not constant as well. */
3791 if (swap_commutative_operands_p (op0
, op1
))
3796 code
= swap_condition (code
);
3799 /* get_condition will prefer to generate LT and GT even if the old
3800 comparison was against zero, so undo that canonicalization here since
3801 comparisons against zero are cheaper. */
3802 if (code
== LT
&& op1
== const1_rtx
)
3803 code
= LE
, op1
= const0_rtx
;
3804 else if (code
== GT
&& op1
== constm1_rtx
)
3805 code
= GE
, op1
= const0_rtx
;
3807 if (cmode
== VOIDmode
)
3808 cmode
= GET_MODE (op0
);
3810 if (swap_commutative_operands_p (op2
, op3
)
3811 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3820 if (mode
== VOIDmode
)
3821 mode
= GET_MODE (op2
);
3823 icode
= movcc_gen_code
[mode
];
3825 if (icode
== CODE_FOR_nothing
)
3830 op2
= force_not_mem (op2
);
3831 op3
= force_not_mem (op3
);
3835 target
= gen_reg_rtx (mode
);
3839 /* If the insn doesn't accept these operands, put them in pseudos. */
3841 if (! (*insn_data
[icode
].operand
[0].predicate
)
3842 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3843 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3845 if (! (*insn_data
[icode
].operand
[2].predicate
)
3846 (op2
, insn_data
[icode
].operand
[2].mode
))
3847 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3849 if (! (*insn_data
[icode
].operand
[3].predicate
)
3850 (op3
, insn_data
[icode
].operand
[3].mode
))
3851 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3853 /* Everything should now be in the suitable form, so emit the compare insn
3854 and then the conditional move. */
3857 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3859 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3860 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3861 return NULL and let the caller figure out how best to deal with this
3863 if (GET_CODE (comparison
) != code
)
3866 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3868 /* If that failed, then give up. */
3874 if (subtarget
!= target
)
3875 convert_move (target
, subtarget
, 0);
3880 /* Return nonzero if a conditional move of mode MODE is supported.
3882 This function is for combine so it can tell whether an insn that looks
3883 like a conditional move is actually supported by the hardware. If we
3884 guess wrong we lose a bit on optimization, but that's it. */
3885 /* ??? sparc64 supports conditionally moving integers values based on fp
3886 comparisons, and vice versa. How do we handle them? */
3889 can_conditionally_move_p (enum machine_mode mode
)
3891 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3897 #endif /* HAVE_conditional_move */
3899 /* Emit a conditional addition instruction if the machine supports one for that
3900 condition and machine mode.
3902 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3903 the mode to use should they be constants. If it is VOIDmode, they cannot
3906 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3907 should be stored there. MODE is the mode to use should they be constants.
3908 If it is VOIDmode, they cannot both be constants.
3910 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3911 is not supported. */
3914 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3915 enum machine_mode cmode
, rtx op2
, rtx op3
,
3916 enum machine_mode mode
, int unsignedp
)
3918 rtx tem
, subtarget
, comparison
, insn
;
3919 enum insn_code icode
;
3920 enum rtx_code reversed
;
3922 /* If one operand is constant, make it the second one. Only do this
3923 if the other operand is not constant as well. */
3925 if (swap_commutative_operands_p (op0
, op1
))
3930 code
= swap_condition (code
);
3933 /* get_condition will prefer to generate LT and GT even if the old
3934 comparison was against zero, so undo that canonicalization here since
3935 comparisons against zero are cheaper. */
3936 if (code
== LT
&& op1
== const1_rtx
)
3937 code
= LE
, op1
= const0_rtx
;
3938 else if (code
== GT
&& op1
== constm1_rtx
)
3939 code
= GE
, op1
= const0_rtx
;
3941 if (cmode
== VOIDmode
)
3942 cmode
= GET_MODE (op0
);
3944 if (swap_commutative_operands_p (op2
, op3
)
3945 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3954 if (mode
== VOIDmode
)
3955 mode
= GET_MODE (op2
);
3957 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3959 if (icode
== CODE_FOR_nothing
)
3964 op2
= force_not_mem (op2
);
3965 op3
= force_not_mem (op3
);
3969 target
= gen_reg_rtx (mode
);
3971 /* If the insn doesn't accept these operands, put them in pseudos. */
3973 if (! (*insn_data
[icode
].operand
[0].predicate
)
3974 (target
, insn_data
[icode
].operand
[0].mode
))
3975 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3979 if (! (*insn_data
[icode
].operand
[2].predicate
)
3980 (op2
, insn_data
[icode
].operand
[2].mode
))
3981 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3983 if (! (*insn_data
[icode
].operand
[3].predicate
)
3984 (op3
, insn_data
[icode
].operand
[3].mode
))
3985 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3987 /* Everything should now be in the suitable form, so emit the compare insn
3988 and then the conditional move. */
3991 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3993 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3994 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3995 return NULL and let the caller figure out how best to deal with this
3997 if (GET_CODE (comparison
) != code
)
4000 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4002 /* If that failed, then give up. */
4008 if (subtarget
!= target
)
4009 convert_move (target
, subtarget
, 0);
4014 /* These functions attempt to generate an insn body, rather than
4015 emitting the insn, but if the gen function already emits them, we
4016 make no attempt to turn them back into naked patterns. */
4018 /* Generate and return an insn body to add Y to X. */
4021 gen_add2_insn (rtx x
, rtx y
)
4023 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4025 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4026 (x
, insn_data
[icode
].operand
[0].mode
))
4027 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4028 (x
, insn_data
[icode
].operand
[1].mode
))
4029 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4030 (y
, insn_data
[icode
].operand
[2].mode
)))
4033 return (GEN_FCN (icode
) (x
, x
, y
));
4036 /* Generate and return an insn body to add r1 and c,
4037 storing the result in r0. */
4039 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4041 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4043 if (icode
== CODE_FOR_nothing
4044 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4045 (r0
, insn_data
[icode
].operand
[0].mode
))
4046 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4047 (r1
, insn_data
[icode
].operand
[1].mode
))
4048 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4049 (c
, insn_data
[icode
].operand
[2].mode
)))
4052 return (GEN_FCN (icode
) (r0
, r1
, c
));
4056 have_add2_insn (rtx x
, rtx y
)
4060 if (GET_MODE (x
) == VOIDmode
)
4063 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4065 if (icode
== CODE_FOR_nothing
)
4068 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4069 (x
, insn_data
[icode
].operand
[0].mode
))
4070 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4071 (x
, insn_data
[icode
].operand
[1].mode
))
4072 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4073 (y
, insn_data
[icode
].operand
[2].mode
)))
4079 /* Generate and return an insn body to subtract Y from X. */
4082 gen_sub2_insn (rtx x
, rtx y
)
4084 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4086 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4087 (x
, insn_data
[icode
].operand
[0].mode
))
4088 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4089 (x
, insn_data
[icode
].operand
[1].mode
))
4090 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4091 (y
, insn_data
[icode
].operand
[2].mode
)))
4094 return (GEN_FCN (icode
) (x
, x
, y
));
4097 /* Generate and return an insn body to subtract r1 and c,
4098 storing the result in r0. */
4100 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4102 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4104 if (icode
== CODE_FOR_nothing
4105 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4106 (r0
, insn_data
[icode
].operand
[0].mode
))
4107 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4108 (r1
, insn_data
[icode
].operand
[1].mode
))
4109 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4110 (c
, insn_data
[icode
].operand
[2].mode
)))
4113 return (GEN_FCN (icode
) (r0
, r1
, c
));
4117 have_sub2_insn (rtx x
, rtx y
)
4121 if (GET_MODE (x
) == VOIDmode
)
4124 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4126 if (icode
== CODE_FOR_nothing
)
4129 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4130 (x
, insn_data
[icode
].operand
[0].mode
))
4131 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4132 (x
, insn_data
[icode
].operand
[1].mode
))
4133 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4134 (y
, insn_data
[icode
].operand
[2].mode
)))
4140 /* Generate the body of an instruction to copy Y into X.
4141 It may be a list of insns, if one insn isn't enough. */
4144 gen_move_insn (rtx x
, rtx y
)
4149 emit_move_insn_1 (x
, y
);
4155 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4156 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4157 no such operation exists, CODE_FOR_nothing will be returned. */
4160 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4164 #ifdef HAVE_ptr_extend
4166 return CODE_FOR_ptr_extend
;
4169 tab
= unsignedp
? zext_optab
: sext_optab
;
4170 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4173 /* Generate the body of an insn to extend Y (with mode MFROM)
4174 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4177 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4178 enum machine_mode mfrom
, int unsignedp
)
4180 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4181 return GEN_FCN (icode
) (x
, y
);
4184 /* can_fix_p and can_float_p say whether the target machine
4185 can directly convert a given fixed point type to
4186 a given floating point type, or vice versa.
4187 The returned value is the CODE_FOR_... value to use,
4188 or CODE_FOR_nothing if these modes cannot be directly converted.
4190 *TRUNCP_PTR is set to 1 if it is necessary to output
4191 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4193 static enum insn_code
4194 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4195 int unsignedp
, int *truncp_ptr
)
4198 enum insn_code icode
;
4200 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4201 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4202 if (icode
!= CODE_FOR_nothing
)
4208 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4209 for this to work. We need to rework the fix* and ftrunc* patterns
4210 and documentation. */
4211 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4212 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4213 if (icode
!= CODE_FOR_nothing
4214 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4221 return CODE_FOR_nothing
;
4224 static enum insn_code
4225 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4230 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4231 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4234 /* Generate code to convert FROM to floating point
4235 and store in TO. FROM must be fixed point and not VOIDmode.
4236 UNSIGNEDP nonzero means regard FROM as unsigned.
4237 Normally this is done by correcting the final value
4238 if it is negative. */
4241 expand_float (rtx to
, rtx from
, int unsignedp
)
4243 enum insn_code icode
;
4245 enum machine_mode fmode
, imode
;
4247 /* Crash now, because we won't be able to decide which mode to use. */
4248 if (GET_MODE (from
) == VOIDmode
)
4251 /* Look for an insn to do the conversion. Do it in the specified
4252 modes if possible; otherwise convert either input, output or both to
4253 wider mode. If the integer mode is wider than the mode of FROM,
4254 we can do the conversion signed even if the input is unsigned. */
4256 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4257 fmode
= GET_MODE_WIDER_MODE (fmode
))
4258 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4259 imode
= GET_MODE_WIDER_MODE (imode
))
4261 int doing_unsigned
= unsignedp
;
4263 if (fmode
!= GET_MODE (to
)
4264 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4267 icode
= can_float_p (fmode
, imode
, unsignedp
);
4268 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4269 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4271 if (icode
!= CODE_FOR_nothing
)
4273 if (imode
!= GET_MODE (from
))
4274 from
= convert_to_mode (imode
, from
, unsignedp
);
4276 if (fmode
!= GET_MODE (to
))
4277 target
= gen_reg_rtx (fmode
);
4279 emit_unop_insn (icode
, target
, from
,
4280 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4283 convert_move (to
, target
, 0);
4288 /* Unsigned integer, and no way to convert directly.
4289 Convert as signed, then conditionally adjust the result. */
4292 rtx label
= gen_label_rtx ();
4294 REAL_VALUE_TYPE offset
;
4297 from
= force_not_mem (from
);
4299 /* Look for a usable floating mode FMODE wider than the source and at
4300 least as wide as the target. Using FMODE will avoid rounding woes
4301 with unsigned values greater than the signed maximum value. */
4303 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4304 fmode
= GET_MODE_WIDER_MODE (fmode
))
4305 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4306 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4309 if (fmode
== VOIDmode
)
4311 /* There is no such mode. Pretend the target is wide enough. */
4312 fmode
= GET_MODE (to
);
4314 /* Avoid double-rounding when TO is narrower than FROM. */
4315 if ((significand_size (fmode
) + 1)
4316 < GET_MODE_BITSIZE (GET_MODE (from
)))
4319 rtx neglabel
= gen_label_rtx ();
4321 /* Don't use TARGET if it isn't a register, is a hard register,
4322 or is the wrong mode. */
4324 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4325 || GET_MODE (target
) != fmode
)
4326 target
= gen_reg_rtx (fmode
);
4328 imode
= GET_MODE (from
);
4329 do_pending_stack_adjust ();
4331 /* Test whether the sign bit is set. */
4332 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4335 /* The sign bit is not set. Convert as signed. */
4336 expand_float (target
, from
, 0);
4337 emit_jump_insn (gen_jump (label
));
4340 /* The sign bit is set.
4341 Convert to a usable (positive signed) value by shifting right
4342 one bit, while remembering if a nonzero bit was shifted
4343 out; i.e., compute (from & 1) | (from >> 1). */
4345 emit_label (neglabel
);
4346 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4347 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4348 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4350 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4352 expand_float (target
, temp
, 0);
4354 /* Multiply by 2 to undo the shift above. */
4355 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4356 target
, 0, OPTAB_LIB_WIDEN
);
4358 emit_move_insn (target
, temp
);
4360 do_pending_stack_adjust ();
4366 /* If we are about to do some arithmetic to correct for an
4367 unsigned operand, do it in a pseudo-register. */
4369 if (GET_MODE (to
) != fmode
4370 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4371 target
= gen_reg_rtx (fmode
);
4373 /* Convert as signed integer to floating. */
4374 expand_float (target
, from
, 0);
4376 /* If FROM is negative (and therefore TO is negative),
4377 correct its value by 2**bitwidth. */
4379 do_pending_stack_adjust ();
4380 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4384 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4385 temp
= expand_binop (fmode
, add_optab
, target
,
4386 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4387 target
, 0, OPTAB_LIB_WIDEN
);
4389 emit_move_insn (target
, temp
);
4391 do_pending_stack_adjust ();
4396 /* No hardware instruction available; call a library routine. */
4401 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4403 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4404 from
= convert_to_mode (SImode
, from
, unsignedp
);
4407 from
= force_not_mem (from
);
4409 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4415 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4416 GET_MODE (to
), 1, from
,
4418 insns
= get_insns ();
4421 emit_libcall_block (insns
, target
, value
,
4422 gen_rtx_FLOAT (GET_MODE (to
), from
));
4427 /* Copy result to requested destination
4428 if we have been computing in a temp location. */
4432 if (GET_MODE (target
) == GET_MODE (to
))
4433 emit_move_insn (to
, target
);
4435 convert_move (to
, target
, 0);
4439 /* Generate code to convert FROM to fixed point and store in TO. FROM
4440 must be floating point. */
4443 expand_fix (rtx to
, rtx from
, int unsignedp
)
4445 enum insn_code icode
;
4447 enum machine_mode fmode
, imode
;
4450 /* We first try to find a pair of modes, one real and one integer, at
4451 least as wide as FROM and TO, respectively, in which we can open-code
4452 this conversion. If the integer mode is wider than the mode of TO,
4453 we can do the conversion either signed or unsigned. */
4455 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4456 fmode
= GET_MODE_WIDER_MODE (fmode
))
4457 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4458 imode
= GET_MODE_WIDER_MODE (imode
))
4460 int doing_unsigned
= unsignedp
;
4462 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4463 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4464 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4466 if (icode
!= CODE_FOR_nothing
)
4468 if (fmode
!= GET_MODE (from
))
4469 from
= convert_to_mode (fmode
, from
, 0);
4473 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4474 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4478 if (imode
!= GET_MODE (to
))
4479 target
= gen_reg_rtx (imode
);
4481 emit_unop_insn (icode
, target
, from
,
4482 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4484 convert_move (to
, target
, unsignedp
);
4489 /* For an unsigned conversion, there is one more way to do it.
4490 If we have a signed conversion, we generate code that compares
4491 the real value to the largest representable positive number. If if
4492 is smaller, the conversion is done normally. Otherwise, subtract
4493 one plus the highest signed number, convert, and add it back.
4495 We only need to check all real modes, since we know we didn't find
4496 anything with a wider integer mode.
4498 This code used to extend FP value into mode wider than the destination.
4499 This is not needed. Consider, for instance conversion from SFmode
4502 The hot path trought the code is dealing with inputs smaller than 2^63
4503 and doing just the conversion, so there is no bits to lose.
4505 In the other path we know the value is positive in the range 2^63..2^64-1
4506 inclusive. (as for other imput overflow happens and result is undefined)
4507 So we know that the most important bit set in mantissa corresponds to
4508 2^63. The subtraction of 2^63 should not generate any rounding as it
4509 simply clears out that bit. The rest is trivial. */
4511 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4512 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4513 fmode
= GET_MODE_WIDER_MODE (fmode
))
4514 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4518 REAL_VALUE_TYPE offset
;
4519 rtx limit
, lab1
, lab2
, insn
;
4521 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4522 real_2expN (&offset
, bitsize
- 1);
4523 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4524 lab1
= gen_label_rtx ();
4525 lab2
= gen_label_rtx ();
4528 from
= force_not_mem (from
);
4530 if (fmode
!= GET_MODE (from
))
4531 from
= convert_to_mode (fmode
, from
, 0);
4533 /* See if we need to do the subtraction. */
4534 do_pending_stack_adjust ();
4535 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4538 /* If not, do the signed "fix" and branch around fixup code. */
4539 expand_fix (to
, from
, 0);
4540 emit_jump_insn (gen_jump (lab2
));
4543 /* Otherwise, subtract 2**(N-1), convert to signed number,
4544 then add 2**(N-1). Do the addition using XOR since this
4545 will often generate better code. */
4547 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4548 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4549 expand_fix (to
, target
, 0);
4550 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4552 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4554 to
, 1, OPTAB_LIB_WIDEN
);
4557 emit_move_insn (to
, target
);
4561 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4562 != CODE_FOR_nothing
)
4564 /* Make a place for a REG_NOTE and add it. */
4565 insn
= emit_move_insn (to
, to
);
4566 set_unique_reg_note (insn
,
4568 gen_rtx_fmt_e (UNSIGNED_FIX
,
4576 /* We can't do it with an insn, so use a library call. But first ensure
4577 that the mode of TO is at least as wide as SImode, since those are the
4578 only library calls we know about. */
4580 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4582 target
= gen_reg_rtx (SImode
);
4584 expand_fix (target
, from
, unsignedp
);
4592 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4593 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4598 from
= force_not_mem (from
);
4602 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4603 GET_MODE (to
), 1, from
,
4605 insns
= get_insns ();
4608 emit_libcall_block (insns
, target
, value
,
4609 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4610 GET_MODE (to
), from
));
4615 if (GET_MODE (to
) == GET_MODE (target
))
4616 emit_move_insn (to
, target
);
4618 convert_move (to
, target
, 0);
4622 /* Report whether we have an instruction to perform the operation
4623 specified by CODE on operands of mode MODE. */
4625 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4627 return (code_to_optab
[(int) code
] != 0
4628 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4629 != CODE_FOR_nothing
));
4632 /* Create a blank optab. */
4637 optab op
= ggc_alloc (sizeof (struct optab
));
4638 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4640 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4641 op
->handlers
[i
].libfunc
= 0;
4647 static convert_optab
4648 new_convert_optab (void)
4651 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4652 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4653 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4655 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4656 op
->handlers
[i
][j
].libfunc
= 0;
4661 /* Same, but fill in its code as CODE, and write it into the
4662 code_to_optab table. */
4664 init_optab (enum rtx_code code
)
4666 optab op
= new_optab ();
4668 code_to_optab
[(int) code
] = op
;
4672 /* Same, but fill in its code as CODE, and do _not_ write it into
4673 the code_to_optab table. */
4675 init_optabv (enum rtx_code code
)
4677 optab op
= new_optab ();
4682 /* Conversion optabs never go in the code_to_optab table. */
4683 static inline convert_optab
4684 init_convert_optab (enum rtx_code code
)
4686 convert_optab op
= new_convert_optab ();
4691 /* Initialize the libfunc fields of an entire group of entries in some
4692 optab. Each entry is set equal to a string consisting of a leading
4693 pair of underscores followed by a generic operation name followed by
4694 a mode name (downshifted to lowercase) followed by a single character
4695 representing the number of operands for the given operation (which is
4696 usually one of the characters '2', '3', or '4').
4698 OPTABLE is the table in which libfunc fields are to be initialized.
4699 FIRST_MODE is the first machine mode index in the given optab to
4701 LAST_MODE is the last machine mode index in the given optab to
4703 OPNAME is the generic (string) name of the operation.
4704 SUFFIX is the character which specifies the number of operands for
4705 the given generic operation.
4709 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4710 const char *opname
, int suffix
)
4713 unsigned opname_len
= strlen (opname
);
4715 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4716 mode
= (enum machine_mode
) ((int) mode
+ 1))
4718 const char *mname
= GET_MODE_NAME (mode
);
4719 unsigned mname_len
= strlen (mname
);
4720 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4727 for (q
= opname
; *q
; )
4729 for (q
= mname
; *q
; q
++)
4730 *p
++ = TOLOWER (*q
);
4734 optable
->handlers
[(int) mode
].libfunc
4735 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4739 /* Initialize the libfunc fields of an entire group of entries in some
4740 optab which correspond to all integer mode operations. The parameters
4741 have the same meaning as similarly named ones for the `init_libfuncs'
4742 routine. (See above). */
4745 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4747 int maxsize
= 2*BITS_PER_WORD
;
4748 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4749 maxsize
= LONG_LONG_TYPE_SIZE
;
4750 init_libfuncs (optable
, word_mode
,
4751 mode_for_size (maxsize
, MODE_INT
, 0),
4755 /* Initialize the libfunc fields of an entire group of entries in some
4756 optab which correspond to all real mode operations. The parameters
4757 have the same meaning as similarly named ones for the `init_libfuncs'
4758 routine. (See above). */
4761 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4763 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4766 /* Initialize the libfunc fields of an entire group of entries of an
4767 inter-mode-class conversion optab. The string formation rules are
4768 similar to the ones for init_libfuncs, above, but instead of having
4769 a mode name and an operand count these functions have two mode names
4770 and no operand count. */
4772 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4773 enum mode_class from_class
,
4774 enum mode_class to_class
)
4776 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4777 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4778 size_t opname_len
= strlen (opname
);
4779 size_t max_mname_len
= 0;
4781 enum machine_mode fmode
, tmode
;
4782 const char *fname
, *tname
;
4784 char *libfunc_name
, *suffix
;
4787 for (fmode
= first_from_mode
;
4789 fmode
= GET_MODE_WIDER_MODE (fmode
))
4790 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4792 for (tmode
= first_to_mode
;
4794 tmode
= GET_MODE_WIDER_MODE (tmode
))
4795 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4797 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4798 libfunc_name
[0] = '_';
4799 libfunc_name
[1] = '_';
4800 memcpy (&libfunc_name
[2], opname
, opname_len
);
4801 suffix
= libfunc_name
+ opname_len
+ 2;
4803 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4804 fmode
= GET_MODE_WIDER_MODE (fmode
))
4805 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4806 tmode
= GET_MODE_WIDER_MODE (tmode
))
4808 fname
= GET_MODE_NAME (fmode
);
4809 tname
= GET_MODE_NAME (tmode
);
4812 for (q
= fname
; *q
; p
++, q
++)
4814 for (q
= tname
; *q
; p
++, q
++)
4819 tab
->handlers
[tmode
][fmode
].libfunc
4820 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4825 /* Initialize the libfunc fields of an entire group of entries of an
4826 intra-mode-class conversion optab. The string formation rules are
4827 similar to the ones for init_libfunc, above. WIDENING says whether
4828 the optab goes from narrow to wide modes or vice versa. These functions
4829 have two mode names _and_ an operand count. */
4831 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4832 enum mode_class
class, bool widening
)
4834 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4835 size_t opname_len
= strlen (opname
);
4836 size_t max_mname_len
= 0;
4838 enum machine_mode nmode
, wmode
;
4839 const char *nname
, *wname
;
4841 char *libfunc_name
, *suffix
;
4844 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4845 nmode
= GET_MODE_WIDER_MODE (nmode
))
4846 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4848 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4849 libfunc_name
[0] = '_';
4850 libfunc_name
[1] = '_';
4851 memcpy (&libfunc_name
[2], opname
, opname_len
);
4852 suffix
= libfunc_name
+ opname_len
+ 2;
4854 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4855 nmode
= GET_MODE_WIDER_MODE (nmode
))
4856 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4857 wmode
= GET_MODE_WIDER_MODE (wmode
))
4859 nname
= GET_MODE_NAME (nmode
);
4860 wname
= GET_MODE_NAME (wmode
);
4863 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4865 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4871 tab
->handlers
[widening
? wmode
: nmode
]
4872 [widening
? nmode
: wmode
].libfunc
4873 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4880 init_one_libfunc (const char *name
)
4884 /* Create a FUNCTION_DECL that can be passed to
4885 targetm.encode_section_info. */
4886 /* ??? We don't have any type information except for this is
4887 a function. Pretend this is "int foo()". */
4888 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4889 build_function_type (integer_type_node
, NULL_TREE
));
4890 DECL_ARTIFICIAL (decl
) = 1;
4891 DECL_EXTERNAL (decl
) = 1;
4892 TREE_PUBLIC (decl
) = 1;
4894 symbol
= XEXP (DECL_RTL (decl
), 0);
4896 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4897 are the flags assigned by targetm.encode_section_info. */
4898 SYMBOL_REF_DECL (symbol
) = 0;
4903 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4904 MODE to NAME, which should be either 0 or a string constant. */
4906 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4909 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4911 optable
->handlers
[mode
].libfunc
= 0;
4914 /* Call this to reset the function entry for one conversion optab
4915 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4916 either 0 or a string constant. */
4918 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4919 enum machine_mode fmode
, const char *name
)
4922 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4924 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4927 /* Call this once to initialize the contents of the optabs
4928 appropriately for the current target machine. */
4935 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4937 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4938 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4940 #ifdef HAVE_conditional_move
4941 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4942 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4945 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4947 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4948 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4951 add_optab
= init_optab (PLUS
);
4952 addv_optab
= init_optabv (PLUS
);
4953 sub_optab
= init_optab (MINUS
);
4954 subv_optab
= init_optabv (MINUS
);
4955 smul_optab
= init_optab (MULT
);
4956 smulv_optab
= init_optabv (MULT
);
4957 smul_highpart_optab
= init_optab (UNKNOWN
);
4958 umul_highpart_optab
= init_optab (UNKNOWN
);
4959 smul_widen_optab
= init_optab (UNKNOWN
);
4960 umul_widen_optab
= init_optab (UNKNOWN
);
4961 sdiv_optab
= init_optab (DIV
);
4962 sdivv_optab
= init_optabv (DIV
);
4963 sdivmod_optab
= init_optab (UNKNOWN
);
4964 udiv_optab
= init_optab (UDIV
);
4965 udivmod_optab
= init_optab (UNKNOWN
);
4966 smod_optab
= init_optab (MOD
);
4967 umod_optab
= init_optab (UMOD
);
4968 fmod_optab
= init_optab (UNKNOWN
);
4969 drem_optab
= init_optab (UNKNOWN
);
4970 ftrunc_optab
= init_optab (UNKNOWN
);
4971 and_optab
= init_optab (AND
);
4972 ior_optab
= init_optab (IOR
);
4973 xor_optab
= init_optab (XOR
);
4974 ashl_optab
= init_optab (ASHIFT
);
4975 ashr_optab
= init_optab (ASHIFTRT
);
4976 lshr_optab
= init_optab (LSHIFTRT
);
4977 rotl_optab
= init_optab (ROTATE
);
4978 rotr_optab
= init_optab (ROTATERT
);
4979 smin_optab
= init_optab (SMIN
);
4980 smax_optab
= init_optab (SMAX
);
4981 umin_optab
= init_optab (UMIN
);
4982 umax_optab
= init_optab (UMAX
);
4983 pow_optab
= init_optab (UNKNOWN
);
4984 atan2_optab
= init_optab (UNKNOWN
);
4986 /* These three have codes assigned exclusively for the sake of
4988 mov_optab
= init_optab (SET
);
4989 movstrict_optab
= init_optab (STRICT_LOW_PART
);
4990 cmp_optab
= init_optab (COMPARE
);
4992 ucmp_optab
= init_optab (UNKNOWN
);
4993 tst_optab
= init_optab (UNKNOWN
);
4995 eq_optab
= init_optab (EQ
);
4996 ne_optab
= init_optab (NE
);
4997 gt_optab
= init_optab (GT
);
4998 ge_optab
= init_optab (GE
);
4999 lt_optab
= init_optab (LT
);
5000 le_optab
= init_optab (LE
);
5001 unord_optab
= init_optab (UNORDERED
);
5003 neg_optab
= init_optab (NEG
);
5004 negv_optab
= init_optabv (NEG
);
5005 abs_optab
= init_optab (ABS
);
5006 absv_optab
= init_optabv (ABS
);
5007 addcc_optab
= init_optab (UNKNOWN
);
5008 one_cmpl_optab
= init_optab (NOT
);
5009 ffs_optab
= init_optab (FFS
);
5010 clz_optab
= init_optab (CLZ
);
5011 ctz_optab
= init_optab (CTZ
);
5012 popcount_optab
= init_optab (POPCOUNT
);
5013 parity_optab
= init_optab (PARITY
);
5014 sqrt_optab
= init_optab (SQRT
);
5015 floor_optab
= init_optab (UNKNOWN
);
5016 ceil_optab
= init_optab (UNKNOWN
);
5017 round_optab
= init_optab (UNKNOWN
);
5018 btrunc_optab
= init_optab (UNKNOWN
);
5019 nearbyint_optab
= init_optab (UNKNOWN
);
5020 rint_optab
= init_optab (UNKNOWN
);
5021 sincos_optab
= init_optab (UNKNOWN
);
5022 sin_optab
= init_optab (UNKNOWN
);
5023 asin_optab
= init_optab (UNKNOWN
);
5024 cos_optab
= init_optab (UNKNOWN
);
5025 acos_optab
= init_optab (UNKNOWN
);
5026 exp_optab
= init_optab (UNKNOWN
);
5027 exp10_optab
= init_optab (UNKNOWN
);
5028 exp2_optab
= init_optab (UNKNOWN
);
5029 expm1_optab
= init_optab (UNKNOWN
);
5030 ldexp_optab
= init_optab (UNKNOWN
);
5031 logb_optab
= init_optab (UNKNOWN
);
5032 ilogb_optab
= init_optab (UNKNOWN
);
5033 log_optab
= init_optab (UNKNOWN
);
5034 log10_optab
= init_optab (UNKNOWN
);
5035 log2_optab
= init_optab (UNKNOWN
);
5036 log1p_optab
= init_optab (UNKNOWN
);
5037 tan_optab
= init_optab (UNKNOWN
);
5038 atan_optab
= init_optab (UNKNOWN
);
5039 copysign_optab
= init_optab (UNKNOWN
);
5041 strlen_optab
= init_optab (UNKNOWN
);
5042 cbranch_optab
= init_optab (UNKNOWN
);
5043 cmov_optab
= init_optab (UNKNOWN
);
5044 cstore_optab
= init_optab (UNKNOWN
);
5045 push_optab
= init_optab (UNKNOWN
);
5047 vec_extract_optab
= init_optab (UNKNOWN
);
5048 vec_set_optab
= init_optab (UNKNOWN
);
5049 vec_init_optab
= init_optab (UNKNOWN
);
5050 vec_realign_load_optab
= init_optab (UNKNOWN
);
5051 movmisalign_optab
= init_optab (UNKNOWN
);
5053 powi_optab
= init_optab (UNKNOWN
);
5056 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5057 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5058 trunc_optab
= init_convert_optab (TRUNCATE
);
5059 sfix_optab
= init_convert_optab (FIX
);
5060 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5061 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5062 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5063 sfloat_optab
= init_convert_optab (FLOAT
);
5064 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5066 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5068 movmem_optab
[i
] = CODE_FOR_nothing
;
5069 clrmem_optab
[i
] = CODE_FOR_nothing
;
5070 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5071 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5073 #ifdef HAVE_SECONDARY_RELOADS
5074 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5078 /* Fill in the optabs with the insns we support. */
5081 /* Initialize the optabs with the names of the library functions. */
5082 init_integral_libfuncs (add_optab
, "add", '3');
5083 init_floating_libfuncs (add_optab
, "add", '3');
5084 init_integral_libfuncs (addv_optab
, "addv", '3');
5085 init_floating_libfuncs (addv_optab
, "add", '3');
5086 init_integral_libfuncs (sub_optab
, "sub", '3');
5087 init_floating_libfuncs (sub_optab
, "sub", '3');
5088 init_integral_libfuncs (subv_optab
, "subv", '3');
5089 init_floating_libfuncs (subv_optab
, "sub", '3');
5090 init_integral_libfuncs (smul_optab
, "mul", '3');
5091 init_floating_libfuncs (smul_optab
, "mul", '3');
5092 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5093 init_floating_libfuncs (smulv_optab
, "mul", '3');
5094 init_integral_libfuncs (sdiv_optab
, "div", '3');
5095 init_floating_libfuncs (sdiv_optab
, "div", '3');
5096 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5097 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5098 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5099 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5100 init_integral_libfuncs (smod_optab
, "mod", '3');
5101 init_integral_libfuncs (umod_optab
, "umod", '3');
5102 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5103 init_integral_libfuncs (and_optab
, "and", '3');
5104 init_integral_libfuncs (ior_optab
, "ior", '3');
5105 init_integral_libfuncs (xor_optab
, "xor", '3');
5106 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5107 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5108 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5109 init_integral_libfuncs (smin_optab
, "min", '3');
5110 init_floating_libfuncs (smin_optab
, "min", '3');
5111 init_integral_libfuncs (smax_optab
, "max", '3');
5112 init_floating_libfuncs (smax_optab
, "max", '3');
5113 init_integral_libfuncs (umin_optab
, "umin", '3');
5114 init_integral_libfuncs (umax_optab
, "umax", '3');
5115 init_integral_libfuncs (neg_optab
, "neg", '2');
5116 init_floating_libfuncs (neg_optab
, "neg", '2');
5117 init_integral_libfuncs (negv_optab
, "negv", '2');
5118 init_floating_libfuncs (negv_optab
, "neg", '2');
5119 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5120 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5121 init_integral_libfuncs (clz_optab
, "clz", '2');
5122 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5123 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5124 init_integral_libfuncs (parity_optab
, "parity", '2');
5126 /* Comparison libcalls for integers MUST come in pairs,
5128 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5129 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5130 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5132 /* EQ etc are floating point only. */
5133 init_floating_libfuncs (eq_optab
, "eq", '2');
5134 init_floating_libfuncs (ne_optab
, "ne", '2');
5135 init_floating_libfuncs (gt_optab
, "gt", '2');
5136 init_floating_libfuncs (ge_optab
, "ge", '2');
5137 init_floating_libfuncs (lt_optab
, "lt", '2');
5138 init_floating_libfuncs (le_optab
, "le", '2');
5139 init_floating_libfuncs (unord_optab
, "unord", '2');
5141 init_floating_libfuncs (powi_optab
, "powi", '2');
5144 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5145 MODE_INT
, MODE_FLOAT
);
5146 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5147 MODE_FLOAT
, MODE_INT
);
5148 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5149 MODE_FLOAT
, MODE_INT
);
5151 /* sext_optab is also used for FLOAT_EXTEND. */
5152 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5153 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5155 /* Use cabs for double complex abs, since systems generally have cabs.
5156 Don't define any libcall for float complex, so that cabs will be used. */
5157 if (complex_double_type_node
)
5158 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5159 = init_one_libfunc ("cabs");
5161 /* The ffs function operates on `int'. */
5162 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5163 = init_one_libfunc ("ffs");
5165 abort_libfunc
= init_one_libfunc ("abort");
5166 memcpy_libfunc
= init_one_libfunc ("memcpy");
5167 memmove_libfunc
= init_one_libfunc ("memmove");
5168 memcmp_libfunc
= init_one_libfunc ("memcmp");
5169 memset_libfunc
= init_one_libfunc ("memset");
5170 setbits_libfunc
= init_one_libfunc ("__setbits");
5172 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5173 ? "_Unwind_SjLj_Resume"
5174 : "_Unwind_Resume");
5175 #ifndef DONT_USE_BUILTIN_SETJMP
5176 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5177 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5179 setjmp_libfunc
= init_one_libfunc ("setjmp");
5180 longjmp_libfunc
= init_one_libfunc ("longjmp");
5182 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5183 unwind_sjlj_unregister_libfunc
5184 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5186 /* For function entry/exit instrumentation. */
5187 profile_function_entry_libfunc
5188 = init_one_libfunc ("__cyg_profile_func_enter");
5189 profile_function_exit_libfunc
5190 = init_one_libfunc ("__cyg_profile_func_exit");
5192 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5194 if (HAVE_conditional_trap
)
5195 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5197 /* Allow the target to add more libcalls or rename some, etc. */
5198 targetm
.init_libfuncs ();
5203 /* Print information about the current contents of the optabs on
5207 debug_optab_libfuncs (void)
5213 /* Dump the arithmetic optabs. */
5214 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5215 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5218 struct optab_handlers
*h
;
5221 h
= &o
->handlers
[j
];
5224 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5226 fprintf (stderr
, "%s\t%s:\t%s\n",
5227 GET_RTX_NAME (o
->code
),
5229 XSTR (h
->libfunc
, 0));
5233 /* Dump the conversion optabs. */
5234 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5235 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5236 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5239 struct optab_handlers
*h
;
5241 o
= &convert_optab_table
[i
];
5242 h
= &o
->handlers
[j
][k
];
5245 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5247 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5248 GET_RTX_NAME (o
->code
),
5251 XSTR (h
->libfunc
, 0));
5259 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5260 CODE. Return 0 on failure. */
5263 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5264 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5266 enum machine_mode mode
= GET_MODE (op1
);
5267 enum insn_code icode
;
5270 if (!HAVE_conditional_trap
)
5273 if (mode
== VOIDmode
)
5276 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5277 if (icode
== CODE_FOR_nothing
)
5281 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5282 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5288 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5290 PUT_CODE (trap_rtx
, code
);
5291 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5295 insn
= get_insns ();
5302 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5303 or unsigned operation code. */
5305 static enum rtx_code
5306 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5318 code
= unsignedp
? LTU
: LT
;
5321 code
= unsignedp
? LEU
: LE
;
5324 code
= unsignedp
? GTU
: GT
;
5327 code
= unsignedp
? GEU
: GE
;
5330 case UNORDERED_EXPR
:
5361 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5362 unsigned operators. Do not generate compare instruction. */
5365 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5367 enum rtx_code rcode
;
5369 rtx rtx_op0
, rtx_op1
;
5371 if (!COMPARISON_CLASS_P (cond
))
5373 /* This is unlikely. While generating VEC_COND_EXPR,
5374 auto vectorizer ensures that condition is a relational
5380 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5381 t_op0
= TREE_OPERAND (cond
, 0);
5382 t_op1
= TREE_OPERAND (cond
, 1);
5385 /* Expand operands. */
5386 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5387 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5389 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5390 && GET_MODE (rtx_op0
) != VOIDmode
)
5391 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5393 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5394 && GET_MODE (rtx_op1
) != VOIDmode
)
5395 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5397 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5400 /* Return insn code for VEC_COND_EXPR EXPR. */
5402 static inline enum insn_code
5403 get_vcond_icode (tree expr
, enum machine_mode mode
)
5405 enum insn_code icode
= CODE_FOR_nothing
;
5407 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5408 icode
= vcondu_gen_code
[mode
];
5410 icode
= vcond_gen_code
[mode
];
5414 /* Return TRUE iff, appropriate vector insns are available
5415 for vector cond expr expr in VMODE mode. */
5418 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5420 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5425 /* Generate insns for VEC_COND_EXPR. */
5428 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5430 enum insn_code icode
;
5431 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5432 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5433 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5435 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5436 if (icode
== CODE_FOR_nothing
)
5440 target
= gen_reg_rtx (mode
);
5442 /* Get comparison rtx. First expand both cond expr operands. */
5443 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5445 cc_op0
= XEXP (comparison
, 0);
5446 cc_op1
= XEXP (comparison
, 1);
5447 /* Expand both operands and force them in reg, if required. */
5448 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5449 NULL_RTX
, VOIDmode
, 1);
5450 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5451 && mode
!= VOIDmode
)
5452 rtx_op1
= force_reg (mode
, rtx_op1
);
5454 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5455 NULL_RTX
, VOIDmode
, 1);
5456 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5457 && mode
!= VOIDmode
)
5458 rtx_op2
= force_reg (mode
, rtx_op2
);
5460 /* Emit instruction! */
5461 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5462 comparison
, cc_op0
, cc_op1
));
5466 #include "gt-optabs.h"