1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[COI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
149 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
151 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code
) != RTX_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_UNARY
)
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target
, op0
)
179 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
181 insn
= PREV_INSN (last_insn
);
182 while (insn
!= NULL_RTX
)
184 if (reg_set_p (target
, insn
))
187 insn
= PREV_INSN (insn
);
191 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
192 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
194 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
196 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
209 int unsignedp
, int no_extend
)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
222 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
223 return convert_modes (mode
, oldmode
, op
, unsignedp
);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
228 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result
= gen_reg_rtx (mode
);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
235 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code
, tree type
)
257 return one_cmpl_optab
;
266 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
274 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
280 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
289 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
292 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
294 case REALIGN_LOAD_EXPR
:
295 return vec_realign_load_optab
;
298 return TYPE_UNSIGNED (type
) ? reduc_umax_optab
: reduc_smax_optab
;
301 return TYPE_UNSIGNED (type
) ? reduc_umin_optab
: reduc_smin_optab
;
303 case REDUC_PLUS_EXPR
:
304 return TYPE_UNSIGNED (type
) ? reduc_uplus_optab
: reduc_splus_optab
;
306 case VEC_LSHIFT_EXPR
:
307 return vec_shl_optab
;
309 case VEC_RSHIFT_EXPR
:
310 return vec_shr_optab
;
316 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
320 return trapv
? addv_optab
: add_optab
;
323 return trapv
? subv_optab
: sub_optab
;
326 return trapv
? smulv_optab
: smul_optab
;
329 return trapv
? negv_optab
: neg_optab
;
332 return trapv
? absv_optab
: abs_optab
;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
353 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
355 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
356 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
357 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
358 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
361 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
363 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
364 != CODE_FOR_nothing
);
366 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
367 temp
= gen_reg_rtx (mode
);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
378 xop0
= convert_modes (mode0
,
379 GET_MODE (op0
) != VOIDmode
384 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
385 xop1
= convert_modes (mode1
,
386 GET_MODE (op1
) != VOIDmode
391 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
392 xop2
= convert_modes (mode2
,
393 GET_MODE (op2
) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
402 && mode0
!= VOIDmode
)
403 xop0
= copy_to_mode_reg (mode0
, xop0
);
405 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
406 && mode1
!= VOIDmode
)
407 xop1
= copy_to_mode_reg (mode1
, xop1
);
409 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
410 && mode2
!= VOIDmode
)
411 xop2
= copy_to_mode_reg (mode2
, xop2
);
413 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
426 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
427 enum optab_methods methods
)
429 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
430 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
432 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode
, optab binoptab
,
440 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
441 enum optab_methods methods
)
443 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
444 target
, unsignedp
, methods
);
448 emit_move_insn (target
, x
);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr
, rtx target
)
457 enum insn_code icode
;
458 rtx rtx_op1
, rtx_op2
;
459 enum machine_mode mode1
;
460 enum machine_mode mode2
;
461 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_shift_expr
));
462 tree vec_oprnd
= TREE_OPERAND (vec_shift_expr
, 0);
463 tree shift_oprnd
= TREE_OPERAND (vec_shift_expr
, 1);
467 switch (TREE_CODE (vec_shift_expr
))
469 case VEC_RSHIFT_EXPR
:
470 shift_optab
= vec_shr_optab
;
472 case VEC_LSHIFT_EXPR
:
473 shift_optab
= vec_shl_optab
;
479 icode
= (int) shift_optab
->handlers
[(int) mode
].insn_code
;
480 gcc_assert (icode
!= CODE_FOR_nothing
);
482 mode1
= insn_data
[icode
].operand
[1].mode
;
483 mode2
= insn_data
[icode
].operand
[2].mode
;
485 rtx_op1
= expand_expr (vec_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
486 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode1
)
487 && mode1
!= VOIDmode
)
488 rtx_op1
= force_reg (mode1
, rtx_op1
);
490 rtx_op2
= expand_expr (shift_oprnd
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
491 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode2
)
492 && mode2
!= VOIDmode
)
493 rtx_op2
= force_reg (mode2
, rtx_op2
);
496 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
497 target
= gen_reg_rtx (mode
);
499 /* Emit instruction */
500 pat
= GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
515 rtx outof_target
, rtx into_target
,
516 int unsignedp
, enum optab_methods methods
)
518 if (into_target
!= 0)
519 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
520 into_target
, unsignedp
, methods
))
523 if (outof_target
!= 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab
!= ashr_optab
)
528 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
530 if (!force_expand_binop (word_mode
, binoptab
,
531 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
532 outof_target
, unsignedp
, methods
))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
544 rtx outof_input
, rtx into_input
, rtx op1
,
545 rtx outof_target
, rtx into_target
,
546 int unsignedp
, enum optab_methods methods
,
547 unsigned HOST_WIDE_INT shift_mask
)
549 optab reverse_unsigned_shift
, unsigned_shift
;
552 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
553 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
560 carries
= outof_input
;
561 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
562 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
573 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
574 if (shift_mask
== BITS_PER_WORD
- 1)
576 tmp
= immed_double_const (-1, -1, op1_mode
);
577 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
582 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
583 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
587 if (tmp
== 0 || carries
== 0)
589 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
590 carries
, tmp
, 0, unsignedp
, methods
);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
597 into_target
, unsignedp
, methods
);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
603 into_target
, unsignedp
, methods
))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target
!= 0)
608 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
609 outof_target
, unsignedp
, methods
))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
625 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
626 rtx outof_input
, rtx into_input
,
627 rtx subword_op1
, rtx superword_op1
,
628 rtx outof_target
, rtx into_target
,
629 int unsignedp
, enum optab_methods methods
,
630 unsigned HOST_WIDE_INT shift_mask
)
632 rtx outof_superword
, into_superword
;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
637 if (outof_target
!= 0 && subword_op1
== superword_op1
)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword
= outof_target
;
642 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
643 outof_superword
, 0, unsignedp
, methods
))
648 into_superword
= gen_reg_rtx (word_mode
);
649 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
650 outof_superword
, into_superword
,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode
, binoptab
,
657 outof_input
, into_input
, subword_op1
,
658 outof_target
, into_target
,
659 unsignedp
, methods
, shift_mask
))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
665 into_target
, into_superword
, word_mode
, false))
668 if (outof_target
!= 0)
669 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
670 outof_target
, outof_superword
,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
709 rtx outof_input
, rtx into_input
, rtx op1
,
710 rtx outof_target
, rtx into_target
,
711 int unsignedp
, enum optab_methods methods
,
712 unsigned HOST_WIDE_INT shift_mask
)
714 rtx superword_op1
, tmp
, cmp1
, cmp2
;
715 rtx subword_label
, done_label
;
716 enum rtx_code cmp_code
;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask
>= BITS_PER_WORD
728 && !CONSTANT_P (op1
))
730 if (!expand_doubleword_shift (op1_mode
, binoptab
,
731 outof_input
, into_input
, op1
,
733 unsignedp
, methods
, shift_mask
))
735 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
736 outof_target
, unsignedp
, methods
))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
746 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
752 cmp2
= CONST0_RTX (op1_mode
);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
761 cmp2
= CONST0_RTX (op1_mode
);
763 superword_op1
= cmp1
;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
771 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
773 if (tmp
== const0_rtx
)
774 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
775 outof_target
, into_target
,
778 return expand_subword_shift (op1_mode
, binoptab
,
779 outof_input
, into_input
, op1
,
780 outof_target
, into_target
,
781 unsignedp
, methods
, shift_mask
);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start
= get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
789 cmp_code
, cmp1
, cmp2
,
790 outof_input
, into_input
,
792 outof_target
, into_target
,
793 unsignedp
, methods
, shift_mask
))
795 delete_insns_since (start
);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label
= gen_label_rtx ();
801 done_label
= gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
804 0, 0, subword_label
);
806 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
807 outof_target
, into_target
,
811 emit_jump_insn (gen_jump (done_label
));
813 emit_label (subword_label
);
815 if (!expand_subword_shift (op1_mode
, binoptab
,
816 outof_input
, into_input
, op1
,
817 outof_target
, into_target
,
818 unsignedp
, methods
, shift_mask
))
821 emit_label (done_label
);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
883 bool umulp
, enum optab_methods methods
)
885 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
886 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
887 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
888 rtx product
, adjust
, product_high
, temp
;
890 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
891 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
892 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
893 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
909 NULL_RTX
, 1, methods
);
911 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
912 NULL_RTX
, 0, OPTAB_DIRECT
);
915 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
916 NULL_RTX
, 0, methods
);
919 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
920 NULL_RTX
, 0, OPTAB_DIRECT
);
927 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
928 NULL_RTX
, 0, OPTAB_DIRECT
);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
938 NULL_RTX
, 1, methods
);
940 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
941 NULL_RTX
, 0, OPTAB_DIRECT
);
944 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
945 NULL_RTX
, 0, methods
);
948 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
949 NULL_RTX
, 0, OPTAB_DIRECT
);
956 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
957 NULL_RTX
, 0, OPTAB_DIRECT
);
961 /* OP1_HIGH should now be dead. */
963 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
964 adjust
, 0, OPTAB_DIRECT
);
966 if (target
&& !REG_P (target
))
970 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
971 target
, 1, OPTAB_DIRECT
);
973 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
974 target
, 1, OPTAB_DIRECT
);
979 product_high
= operand_subword (product
, high
, 1, mode
);
980 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
981 REG_P (product_high
) ? product_high
: adjust
,
983 emit_move_insn (product_high
, adjust
);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
992 rtx op1
, rtx target
, int unsignedp
,
993 enum optab_methods methods
)
995 optab binop
= code_to_optab
[(int) code
];
998 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1001 /* Generate code to perform an operation specified by BINOPTAB
1002 on operands OP0 and OP1, with result having machine-mode MODE.
1004 UNSIGNEDP is for the case where we have to widen the operands
1005 to perform the operation. It says to use zero-extension.
1007 If TARGET is nonzero, the value
1008 is generated there, if it is convenient to do so.
1009 In all cases an rtx is returned for the locus of the value;
1010 this may or may not be TARGET. */
1013 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1014 rtx target
, int unsignedp
, enum optab_methods methods
)
1016 enum optab_methods next_methods
1017 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1018 ? OPTAB_WIDEN
: methods
);
1019 enum mode_class
class;
1020 enum machine_mode wider_mode
;
1022 int commutative_op
= 0;
1023 int shift_op
= (binoptab
->code
== ASHIFT
1024 || binoptab
->code
== ASHIFTRT
1025 || binoptab
->code
== LSHIFTRT
1026 || binoptab
->code
== ROTATE
1027 || binoptab
->code
== ROTATERT
);
1028 rtx entry_last
= get_last_insn ();
1030 bool first_pass_p
= true;
1032 class = GET_MODE_CLASS (mode
);
1034 /* If subtracting an integer constant, convert this into an addition of
1035 the negated constant. */
1037 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
1039 op1
= negate_rtx (mode
, op1
);
1040 binoptab
= add_optab
;
1043 /* If we are inside an appropriately-short loop and we are optimizing,
1044 force expensive constants into a register. */
1045 if (CONSTANT_P (op0
) && optimize
1046 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1048 if (GET_MODE (op0
) != VOIDmode
)
1049 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
1050 op0
= force_reg (mode
, op0
);
1053 if (CONSTANT_P (op1
) && optimize
1054 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1056 if (GET_MODE (op1
) != VOIDmode
)
1057 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1058 op1
= force_reg (mode
, op1
);
1061 /* Record where to delete back to if we backtrack. */
1062 last
= get_last_insn ();
1064 /* If operation is commutative,
1065 try to make the first operand a register.
1066 Even better, try to make it the same as the target.
1067 Also try to make the last operand a constant. */
1068 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1069 || binoptab
== smul_widen_optab
1070 || binoptab
== umul_widen_optab
1071 || binoptab
== smul_highpart_optab
1072 || binoptab
== umul_highpart_optab
)
1076 if (((target
== 0 || REG_P (target
))
1080 : rtx_equal_p (op1
, target
))
1081 || GET_CODE (op0
) == CONST_INT
)
1091 /* If we can do it with a three-operand insn, do so. */
1093 if (methods
!= OPTAB_MUST_WIDEN
1094 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1096 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1097 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1098 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1100 rtx xop0
= op0
, xop1
= op1
;
1105 temp
= gen_reg_rtx (mode
);
1107 /* If it is a commutative operator and the modes would match
1108 if we would swap the operands, we can save the conversions. */
1111 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1112 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1116 tmp
= op0
; op0
= op1
; op1
= tmp
;
1117 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1121 /* In case the insn wants input operands in modes different from
1122 those of the actual operands, convert the operands. It would
1123 seem that we don't need to convert CONST_INTs, but we do, so
1124 that they're properly zero-extended, sign-extended or truncated
1127 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1128 xop0
= convert_modes (mode0
,
1129 GET_MODE (op0
) != VOIDmode
1134 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1135 xop1
= convert_modes (mode1
,
1136 GET_MODE (op1
) != VOIDmode
1141 /* Now, if insn's predicates don't allow our operands, put them into
1144 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1145 && mode0
!= VOIDmode
)
1146 xop0
= copy_to_mode_reg (mode0
, xop0
);
1148 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1149 && mode1
!= VOIDmode
)
1150 xop1
= copy_to_mode_reg (mode1
, xop1
);
1152 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
1153 temp
= gen_reg_rtx (mode
);
1155 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1158 /* If PAT is composed of more than one insn, try to add an appropriate
1159 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1160 operand, call ourselves again, this time without a target. */
1161 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1162 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1164 delete_insns_since (last
);
1165 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1166 unsignedp
, methods
);
1173 delete_insns_since (last
);
1176 /* If we were trying to rotate by a constant value, and that didn't
1177 work, try rotating the other direction before falling back to
1178 shifts and bitwise-or. */
1180 && (binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1181 && class == MODE_INT
1182 && GET_CODE (op1
) == CONST_INT
1184 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1186 first_pass_p
= false;
1187 op1
= GEN_INT (GET_MODE_BITSIZE (mode
) - INTVAL (op1
));
1188 binoptab
= binoptab
== rotl_optab
? rotr_optab
: rotl_optab
;
1192 /* If this is a multiply, see if we can do a widening operation that
1193 takes operands of this mode and makes a wider mode. */
1195 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1196 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1197 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1198 != CODE_FOR_nothing
))
1200 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1201 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1202 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1206 if (GET_MODE_CLASS (mode
) == MODE_INT
1207 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1208 GET_MODE_BITSIZE (GET_MODE (temp
))))
1209 return gen_lowpart (mode
, temp
);
1211 return convert_to_mode (mode
, temp
, unsignedp
);
1215 /* Look for a wider mode of the same class for which we think we
1216 can open-code the operation. Check for a widening multiply at the
1217 wider mode as well. */
1219 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1220 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1221 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1222 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1224 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1225 || (binoptab
== smul_optab
1226 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1227 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1228 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1229 != CODE_FOR_nothing
)))
1231 rtx xop0
= op0
, xop1
= op1
;
1234 /* For certain integer operations, we need not actually extend
1235 the narrow operands, as long as we will truncate
1236 the results to the same narrowness. */
1238 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1239 || binoptab
== xor_optab
1240 || binoptab
== add_optab
|| binoptab
== sub_optab
1241 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1242 && class == MODE_INT
)
1245 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1247 /* The second operand of a shift must always be extended. */
1248 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1249 no_extend
&& binoptab
!= ashl_optab
);
1251 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1252 unsignedp
, OPTAB_DIRECT
);
1255 if (class != MODE_INT
1256 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1257 GET_MODE_BITSIZE (wider_mode
)))
1260 target
= gen_reg_rtx (mode
);
1261 convert_move (target
, temp
, 0);
1265 return gen_lowpart (mode
, temp
);
1268 delete_insns_since (last
);
1272 /* These can be done a word at a time. */
1273 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1274 && class == MODE_INT
1275 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1276 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1282 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1283 won't be accurate, so use a new target. */
1284 if (target
== 0 || target
== op0
|| target
== op1
)
1285 target
= gen_reg_rtx (mode
);
1289 /* Do the actual arithmetic. */
1290 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1292 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1293 rtx x
= expand_binop (word_mode
, binoptab
,
1294 operand_subword_force (op0
, i
, mode
),
1295 operand_subword_force (op1
, i
, mode
),
1296 target_piece
, unsignedp
, next_methods
);
1301 if (target_piece
!= x
)
1302 emit_move_insn (target_piece
, x
);
1305 insns
= get_insns ();
1308 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1310 if (binoptab
->code
!= UNKNOWN
)
1312 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1313 copy_rtx (op0
), copy_rtx (op1
));
1317 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1322 /* Synthesize double word shifts from single word shifts. */
1323 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1324 || binoptab
== ashr_optab
)
1325 && class == MODE_INT
1326 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1327 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1328 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1329 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1330 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1332 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1333 enum machine_mode op1_mode
;
1335 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1336 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1337 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1339 /* Apply the truncation to constant shifts. */
1340 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1341 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1343 if (op1
== CONST0_RTX (op1_mode
))
1346 /* Make sure that this is a combination that expand_doubleword_shift
1347 can handle. See the comments there for details. */
1348 if (double_shift_mask
== 0
1349 || (shift_mask
== BITS_PER_WORD
- 1
1350 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1352 rtx insns
, equiv_value
;
1353 rtx into_target
, outof_target
;
1354 rtx into_input
, outof_input
;
1355 int left_shift
, outof_word
;
1357 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1358 won't be accurate, so use a new target. */
1359 if (target
== 0 || target
== op0
|| target
== op1
)
1360 target
= gen_reg_rtx (mode
);
1364 /* OUTOF_* is the word we are shifting bits away from, and
1365 INTO_* is the word that we are shifting bits towards, thus
1366 they differ depending on the direction of the shift and
1367 WORDS_BIG_ENDIAN. */
1369 left_shift
= binoptab
== ashl_optab
;
1370 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1372 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1373 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1375 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1376 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1378 if (expand_doubleword_shift (op1_mode
, binoptab
,
1379 outof_input
, into_input
, op1
,
1380 outof_target
, into_target
,
1381 unsignedp
, methods
, shift_mask
))
1383 insns
= get_insns ();
1386 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1387 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1394 /* Synthesize double word rotates from single word shifts. */
1395 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1396 && class == MODE_INT
1397 && GET_CODE (op1
) == CONST_INT
1398 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1399 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1400 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1402 rtx insns
, equiv_value
;
1403 rtx into_target
, outof_target
;
1404 rtx into_input
, outof_input
;
1406 int shift_count
, left_shift
, outof_word
;
1408 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1409 won't be accurate, so use a new target. Do this also if target is not
1410 a REG, first because having a register instead may open optimization
1411 opportunities, and second because if target and op0 happen to be MEMs
1412 designating the same location, we would risk clobbering it too early
1413 in the code sequence we generate below. */
1414 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1415 target
= gen_reg_rtx (mode
);
1419 shift_count
= INTVAL (op1
);
1421 /* OUTOF_* is the word we are shifting bits away from, and
1422 INTO_* is the word that we are shifting bits towards, thus
1423 they differ depending on the direction of the shift and
1424 WORDS_BIG_ENDIAN. */
1426 left_shift
= (binoptab
== rotl_optab
);
1427 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1429 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1430 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1432 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1433 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1435 if (shift_count
== BITS_PER_WORD
)
1437 /* This is just a word swap. */
1438 emit_move_insn (outof_target
, into_input
);
1439 emit_move_insn (into_target
, outof_input
);
1444 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1445 rtx first_shift_count
, second_shift_count
;
1446 optab reverse_unsigned_shift
, unsigned_shift
;
1448 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1449 ? lshr_optab
: ashl_optab
);
1451 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1452 ? ashl_optab
: lshr_optab
);
1454 if (shift_count
> BITS_PER_WORD
)
1456 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1457 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1461 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1462 second_shift_count
= GEN_INT (shift_count
);
1465 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1466 outof_input
, first_shift_count
,
1467 NULL_RTX
, unsignedp
, next_methods
);
1468 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1469 into_input
, second_shift_count
,
1470 NULL_RTX
, unsignedp
, next_methods
);
1472 if (into_temp1
!= 0 && into_temp2
!= 0)
1473 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1474 into_target
, unsignedp
, next_methods
);
1478 if (inter
!= 0 && inter
!= into_target
)
1479 emit_move_insn (into_target
, inter
);
1481 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1482 into_input
, first_shift_count
,
1483 NULL_RTX
, unsignedp
, next_methods
);
1484 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1485 outof_input
, second_shift_count
,
1486 NULL_RTX
, unsignedp
, next_methods
);
1488 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1489 inter
= expand_binop (word_mode
, ior_optab
,
1490 outof_temp1
, outof_temp2
,
1491 outof_target
, unsignedp
, next_methods
);
1493 if (inter
!= 0 && inter
!= outof_target
)
1494 emit_move_insn (outof_target
, inter
);
1497 insns
= get_insns ();
1502 if (binoptab
->code
!= UNKNOWN
)
1503 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1507 /* We can't make this a no conflict block if this is a word swap,
1508 because the word swap case fails if the input and output values
1509 are in the same register. */
1510 if (shift_count
!= BITS_PER_WORD
)
1511 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1520 /* These can be done a word at a time by propagating carries. */
1521 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1522 && class == MODE_INT
1523 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1524 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1527 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1528 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1529 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1530 rtx xop0
, xop1
, xtarget
;
1532 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1533 value is one of those, use it. Otherwise, use 1 since it is the
1534 one easiest to get. */
1535 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1536 int normalizep
= STORE_FLAG_VALUE
;
1541 /* Prepare the operands. */
1542 xop0
= force_reg (mode
, op0
);
1543 xop1
= force_reg (mode
, op1
);
1545 xtarget
= gen_reg_rtx (mode
);
1547 if (target
== 0 || !REG_P (target
))
1550 /* Indicate for flow that the entire target reg is being set. */
1552 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1554 /* Do the actual arithmetic. */
1555 for (i
= 0; i
< nwords
; i
++)
1557 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1558 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1559 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1560 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1563 /* Main add/subtract of the input operands. */
1564 x
= expand_binop (word_mode
, binoptab
,
1565 op0_piece
, op1_piece
,
1566 target_piece
, unsignedp
, next_methods
);
1572 /* Store carry from main add/subtract. */
1573 carry_out
= gen_reg_rtx (word_mode
);
1574 carry_out
= emit_store_flag_force (carry_out
,
1575 (binoptab
== add_optab
1578 word_mode
, 1, normalizep
);
1585 /* Add/subtract previous carry to main result. */
1586 newx
= expand_binop (word_mode
,
1587 normalizep
== 1 ? binoptab
: otheroptab
,
1589 NULL_RTX
, 1, next_methods
);
1593 /* Get out carry from adding/subtracting carry in. */
1594 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1595 carry_tmp
= emit_store_flag_force (carry_tmp
,
1596 (binoptab
== add_optab
1599 word_mode
, 1, normalizep
);
1601 /* Logical-ior the two poss. carry together. */
1602 carry_out
= expand_binop (word_mode
, ior_optab
,
1603 carry_out
, carry_tmp
,
1604 carry_out
, 0, next_methods
);
1608 emit_move_insn (target_piece
, newx
);
1612 if (x
!= target_piece
)
1613 emit_move_insn (target_piece
, x
);
1616 carry_in
= carry_out
;
1619 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1621 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1622 || ! rtx_equal_p (target
, xtarget
))
1624 rtx temp
= emit_move_insn (target
, xtarget
);
1626 set_unique_reg_note (temp
,
1628 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1639 delete_insns_since (last
);
1642 /* Attempt to synthesize double word multiplies using a sequence of word
1643 mode multiplications. We first attempt to generate a sequence using a
1644 more efficient unsigned widening multiply, and if that fails we then
1645 try using a signed widening multiply. */
1647 if (binoptab
== smul_optab
1648 && class == MODE_INT
1649 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1650 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1651 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1653 rtx product
= NULL_RTX
;
1655 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1656 != CODE_FOR_nothing
)
1658 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1661 delete_insns_since (last
);
1664 if (product
== NULL_RTX
1665 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1666 != CODE_FOR_nothing
)
1668 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1671 delete_insns_since (last
);
1674 if (product
!= NULL_RTX
)
1676 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1678 temp
= emit_move_insn (target
? target
: product
, product
);
1679 set_unique_reg_note (temp
,
1681 gen_rtx_fmt_ee (MULT
, mode
,
1689 /* It can't be open-coded in this mode.
1690 Use a library call if one is available and caller says that's ok. */
1692 if (binoptab
->handlers
[(int) mode
].libfunc
1693 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1697 enum machine_mode op1_mode
= mode
;
1704 op1_mode
= word_mode
;
1705 /* Specify unsigned here,
1706 since negative shift counts are meaningless. */
1707 op1x
= convert_to_mode (word_mode
, op1
, 1);
1710 if (GET_MODE (op0
) != VOIDmode
1711 && GET_MODE (op0
) != mode
)
1712 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1714 /* Pass 1 for NO_QUEUE so we don't lose any increments
1715 if the libcall is cse'd or moved. */
1716 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1717 NULL_RTX
, LCT_CONST
, mode
, 2,
1718 op0
, mode
, op1x
, op1_mode
);
1720 insns
= get_insns ();
1723 target
= gen_reg_rtx (mode
);
1724 emit_libcall_block (insns
, target
, value
,
1725 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1730 delete_insns_since (last
);
1732 /* It can't be done in this mode. Can we do it in a wider mode? */
1734 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1735 || methods
== OPTAB_MUST_WIDEN
))
1737 /* Caller says, don't even try. */
1738 delete_insns_since (entry_last
);
1742 /* Compute the value of METHODS to pass to recursive calls.
1743 Don't allow widening to be tried recursively. */
1745 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1747 /* Look for a wider mode of the same class for which it appears we can do
1750 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1752 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1753 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1755 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1756 != CODE_FOR_nothing
)
1757 || (methods
== OPTAB_LIB
1758 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1760 rtx xop0
= op0
, xop1
= op1
;
1763 /* For certain integer operations, we need not actually extend
1764 the narrow operands, as long as we will truncate
1765 the results to the same narrowness. */
1767 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1768 || binoptab
== xor_optab
1769 || binoptab
== add_optab
|| binoptab
== sub_optab
1770 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1771 && class == MODE_INT
)
1774 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1775 unsignedp
, no_extend
);
1777 /* The second operand of a shift must always be extended. */
1778 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1779 no_extend
&& binoptab
!= ashl_optab
);
1781 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1782 unsignedp
, methods
);
1785 if (class != MODE_INT
1786 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1787 GET_MODE_BITSIZE (wider_mode
)))
1790 target
= gen_reg_rtx (mode
);
1791 convert_move (target
, temp
, 0);
1795 return gen_lowpart (mode
, temp
);
1798 delete_insns_since (last
);
1803 delete_insns_since (entry_last
);
1807 /* Expand a binary operator which has both signed and unsigned forms.
1808 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1811 If we widen unsigned operands, we may use a signed wider operation instead
1812 of an unsigned wider operation, since the result would be the same. */
1815 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1816 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1817 enum optab_methods methods
)
1820 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1821 struct optab wide_soptab
;
1823 /* Do it without widening, if possible. */
1824 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1825 unsignedp
, OPTAB_DIRECT
);
1826 if (temp
|| methods
== OPTAB_DIRECT
)
1829 /* Try widening to a signed int. Make a fake signed optab that
1830 hides any signed insn for direct use. */
1831 wide_soptab
= *soptab
;
1832 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1833 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1835 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1836 unsignedp
, OPTAB_WIDEN
);
1838 /* For unsigned operands, try widening to an unsigned int. */
1839 if (temp
== 0 && unsignedp
)
1840 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1841 unsignedp
, OPTAB_WIDEN
);
1842 if (temp
|| methods
== OPTAB_WIDEN
)
1845 /* Use the right width lib call if that exists. */
1846 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1847 if (temp
|| methods
== OPTAB_LIB
)
1850 /* Must widen and use a lib call, use either signed or unsigned. */
1851 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1852 unsignedp
, methods
);
1856 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1857 unsignedp
, methods
);
1861 /* Generate code to perform an operation specified by UNOPPTAB
1862 on operand OP0, with two results to TARG0 and TARG1.
1863 We assume that the order of the operands for the instruction
1864 is TARG0, TARG1, OP0.
1866 Either TARG0 or TARG1 may be zero, but what that means is that
1867 the result is not actually wanted. We will generate it into
1868 a dummy pseudo-reg and discard it. They may not both be zero.
1870 Returns 1 if this operation can be performed; 0 if not. */
1873 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1876 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1877 enum mode_class
class;
1878 enum machine_mode wider_mode
;
1879 rtx entry_last
= get_last_insn ();
1882 class = GET_MODE_CLASS (mode
);
1885 targ0
= gen_reg_rtx (mode
);
1887 targ1
= gen_reg_rtx (mode
);
1889 /* Record where to go back to if we fail. */
1890 last
= get_last_insn ();
1892 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1894 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1895 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1899 if (GET_MODE (xop0
) != VOIDmode
1900 && GET_MODE (xop0
) != mode0
)
1901 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1903 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1904 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
1905 xop0
= copy_to_mode_reg (mode0
, xop0
);
1907 /* We could handle this, but we should always be called with a pseudo
1908 for our targets and all insns should take them as outputs. */
1909 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
1910 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
1912 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1919 delete_insns_since (last
);
1922 /* It can't be done in this mode. Can we do it in a wider mode? */
1924 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1926 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1927 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1929 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1930 != CODE_FOR_nothing
)
1932 rtx t0
= gen_reg_rtx (wider_mode
);
1933 rtx t1
= gen_reg_rtx (wider_mode
);
1934 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1936 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1938 convert_move (targ0
, t0
, unsignedp
);
1939 convert_move (targ1
, t1
, unsignedp
);
1943 delete_insns_since (last
);
1948 delete_insns_since (entry_last
);
1952 /* Generate code to perform an operation specified by BINOPTAB
1953 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1954 We assume that the order of the operands for the instruction
1955 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1956 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1958 Either TARG0 or TARG1 may be zero, but what that means is that
1959 the result is not actually wanted. We will generate it into
1960 a dummy pseudo-reg and discard it. They may not both be zero.
1962 Returns 1 if this operation can be performed; 0 if not. */
1965 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1968 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1969 enum mode_class
class;
1970 enum machine_mode wider_mode
;
1971 rtx entry_last
= get_last_insn ();
1974 class = GET_MODE_CLASS (mode
);
1976 /* If we are inside an appropriately-short loop and we are optimizing,
1977 force expensive constants into a register. */
1978 if (CONSTANT_P (op0
) && optimize
1979 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1980 op0
= force_reg (mode
, op0
);
1982 if (CONSTANT_P (op1
) && optimize
1983 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1984 op1
= force_reg (mode
, op1
);
1987 targ0
= gen_reg_rtx (mode
);
1989 targ1
= gen_reg_rtx (mode
);
1991 /* Record where to go back to if we fail. */
1992 last
= get_last_insn ();
1994 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1996 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1997 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1998 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2000 rtx xop0
= op0
, xop1
= op1
;
2002 /* In case the insn wants input operands in modes different from
2003 those of the actual operands, convert the operands. It would
2004 seem that we don't need to convert CONST_INTs, but we do, so
2005 that they're properly zero-extended, sign-extended or truncated
2008 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
2009 xop0
= convert_modes (mode0
,
2010 GET_MODE (op0
) != VOIDmode
2015 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
2016 xop1
= convert_modes (mode1
,
2017 GET_MODE (op1
) != VOIDmode
2022 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2023 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2024 xop0
= copy_to_mode_reg (mode0
, xop0
);
2026 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
2027 xop1
= copy_to_mode_reg (mode1
, xop1
);
2029 /* We could handle this, but we should always be called with a pseudo
2030 for our targets and all insns should take them as outputs. */
2031 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
2032 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
2034 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
2041 delete_insns_since (last
);
2044 /* It can't be done in this mode. Can we do it in a wider mode? */
2046 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2048 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2049 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2051 if (binoptab
->handlers
[(int) wider_mode
].insn_code
2052 != CODE_FOR_nothing
)
2054 rtx t0
= gen_reg_rtx (wider_mode
);
2055 rtx t1
= gen_reg_rtx (wider_mode
);
2056 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2057 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2059 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2062 convert_move (targ0
, t0
, unsignedp
);
2063 convert_move (targ1
, t1
, unsignedp
);
2067 delete_insns_since (last
);
2072 delete_insns_since (entry_last
);
2076 /* Expand the two-valued library call indicated by BINOPTAB, but
2077 preserve only one of the values. If TARG0 is non-NULL, the first
2078 value is placed into TARG0; otherwise the second value is placed
2079 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2080 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2081 This routine assumes that the value returned by the library call is
2082 as if the return value was of an integral mode twice as wide as the
2083 mode of OP0. Returns 1 if the call was successful. */
2086 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2087 rtx targ0
, rtx targ1
, enum rtx_code code
)
2089 enum machine_mode mode
;
2090 enum machine_mode libval_mode
;
2094 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2095 gcc_assert (!targ0
!= !targ1
);
2097 mode
= GET_MODE (op0
);
2098 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2101 /* The value returned by the library function will have twice as
2102 many bits as the nominal MODE. */
2103 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2106 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2107 NULL_RTX
, LCT_CONST
,
2111 /* Get the part of VAL containing the value that we want. */
2112 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2113 targ0
? 0 : GET_MODE_SIZE (mode
));
2114 insns
= get_insns ();
2116 /* Move the into the desired location. */
2117 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2118 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2124 /* Wrapper around expand_unop which takes an rtx code to specify
2125 the operation to perform, not an optab pointer. All other
2126 arguments are the same. */
2128 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2129 rtx target
, int unsignedp
)
2131 optab unop
= code_to_optab
[(int) code
];
2134 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2140 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2142 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2144 enum mode_class
class = GET_MODE_CLASS (mode
);
2145 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2147 enum machine_mode wider_mode
;
2148 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2149 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2151 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2152 != CODE_FOR_nothing
)
2154 rtx xop0
, temp
, last
;
2156 last
= get_last_insn ();
2159 target
= gen_reg_rtx (mode
);
2160 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2161 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2163 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2164 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2165 - GET_MODE_BITSIZE (mode
)),
2166 target
, true, OPTAB_DIRECT
);
2168 delete_insns_since (last
);
2177 /* Try calculating (parity x) as (and (popcount x) 1), where
2178 popcount can also be done in a wider mode. */
2180 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2182 enum mode_class
class = GET_MODE_CLASS (mode
);
2183 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2185 enum machine_mode wider_mode
;
2186 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2187 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2189 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2190 != CODE_FOR_nothing
)
2192 rtx xop0
, temp
, last
;
2194 last
= get_last_insn ();
2197 target
= gen_reg_rtx (mode
);
2198 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2199 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2202 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2203 target
, true, OPTAB_DIRECT
);
2205 delete_insns_since (last
);
2214 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2215 conditions, VAL may already be a SUBREG against which we cannot generate
2216 a further SUBREG. In this case, we expect forcing the value into a
2217 register will work around the situation. */
2220 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2221 enum machine_mode imode
)
2224 ret
= lowpart_subreg (omode
, val
, imode
);
2227 val
= force_reg (imode
, val
);
2228 ret
= lowpart_subreg (omode
, val
, imode
);
2229 gcc_assert (ret
!= NULL
);
2234 /* Expand a floating point absolute value or negation operation via a
2235 logical operation on the sign bit. */
2238 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2239 rtx op0
, rtx target
)
2241 const struct real_format
*fmt
;
2242 int bitpos
, word
, nwords
, i
;
2243 enum machine_mode imode
;
2244 HOST_WIDE_INT hi
, lo
;
2247 /* The format has to have a simple sign bit. */
2248 fmt
= REAL_MODE_FORMAT (mode
);
2252 bitpos
= fmt
->signbit_rw
;
2256 /* Don't create negative zeros if the format doesn't support them. */
2257 if (code
== NEG
&& !fmt
->has_signed_zero
)
2260 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2262 imode
= int_mode_for_mode (mode
);
2263 if (imode
== BLKmode
)
2272 if (FLOAT_WORDS_BIG_ENDIAN
)
2273 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2275 word
= bitpos
/ BITS_PER_WORD
;
2276 bitpos
= bitpos
% BITS_PER_WORD
;
2277 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2280 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2283 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2287 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2293 if (target
== 0 || target
== op0
)
2294 target
= gen_reg_rtx (mode
);
2300 for (i
= 0; i
< nwords
; ++i
)
2302 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2303 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2307 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2309 immed_double_const (lo
, hi
, imode
),
2310 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2311 if (temp
!= targ_piece
)
2312 emit_move_insn (targ_piece
, temp
);
2315 emit_move_insn (targ_piece
, op0_piece
);
2318 insns
= get_insns ();
2321 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2322 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2326 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2327 gen_lowpart (imode
, op0
),
2328 immed_double_const (lo
, hi
, imode
),
2329 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2330 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2332 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2333 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2339 /* Generate code to perform an operation specified by UNOPTAB
2340 on operand OP0, with result having machine-mode MODE.
2342 UNSIGNEDP is for the case where we have to widen the operands
2343 to perform the operation. It says to use zero-extension.
2345 If TARGET is nonzero, the value
2346 is generated there, if it is convenient to do so.
2347 In all cases an rtx is returned for the locus of the value;
2348 this may or may not be TARGET. */
2351 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2354 enum mode_class
class;
2355 enum machine_mode wider_mode
;
2357 rtx last
= get_last_insn ();
2360 class = GET_MODE_CLASS (mode
);
2362 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2364 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2365 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2371 temp
= gen_reg_rtx (mode
);
2373 if (GET_MODE (xop0
) != VOIDmode
2374 && GET_MODE (xop0
) != mode0
)
2375 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2377 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2379 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2380 xop0
= copy_to_mode_reg (mode0
, xop0
);
2382 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2383 temp
= gen_reg_rtx (mode
);
2385 pat
= GEN_FCN (icode
) (temp
, xop0
);
2388 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2389 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2391 delete_insns_since (last
);
2392 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2400 delete_insns_since (last
);
2403 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2405 /* Widening clz needs special treatment. */
2406 if (unoptab
== clz_optab
)
2408 temp
= widen_clz (mode
, op0
, target
);
2415 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2416 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2417 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2419 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2423 /* For certain operations, we need not actually extend
2424 the narrow operand, as long as we will truncate the
2425 results to the same narrowness. */
2427 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2428 (unoptab
== neg_optab
2429 || unoptab
== one_cmpl_optab
)
2430 && class == MODE_INT
);
2432 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2437 if (class != MODE_INT
2438 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
2439 GET_MODE_BITSIZE (wider_mode
)))
2442 target
= gen_reg_rtx (mode
);
2443 convert_move (target
, temp
, 0);
2447 return gen_lowpart (mode
, temp
);
2450 delete_insns_since (last
);
2454 /* These can be done a word at a time. */
2455 if (unoptab
== one_cmpl_optab
2456 && class == MODE_INT
2457 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2458 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2463 if (target
== 0 || target
== op0
)
2464 target
= gen_reg_rtx (mode
);
2468 /* Do the actual arithmetic. */
2469 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2471 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2472 rtx x
= expand_unop (word_mode
, unoptab
,
2473 operand_subword_force (op0
, i
, mode
),
2474 target_piece
, unsignedp
);
2476 if (target_piece
!= x
)
2477 emit_move_insn (target_piece
, x
);
2480 insns
= get_insns ();
2483 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2484 gen_rtx_fmt_e (unoptab
->code
, mode
,
2489 if (unoptab
->code
== NEG
)
2491 /* Try negating floating point values by flipping the sign bit. */
2492 if (class == MODE_FLOAT
)
2494 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2499 /* If there is no negation pattern, and we have no negative zero,
2500 try subtracting from zero. */
2501 if (!HONOR_SIGNED_ZEROS (mode
))
2503 temp
= expand_binop (mode
, (unoptab
== negv_optab
2504 ? subv_optab
: sub_optab
),
2505 CONST0_RTX (mode
), op0
, target
,
2506 unsignedp
, OPTAB_DIRECT
);
2512 /* Try calculating parity (x) as popcount (x) % 2. */
2513 if (unoptab
== parity_optab
)
2515 temp
= expand_parity (mode
, op0
, target
);
2521 /* Now try a library call in this mode. */
2522 if (unoptab
->handlers
[(int) mode
].libfunc
)
2526 enum machine_mode outmode
= mode
;
2528 /* All of these functions return small values. Thus we choose to
2529 have them return something that isn't a double-word. */
2530 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2531 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2533 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2537 /* Pass 1 for NO_QUEUE so we don't lose any increments
2538 if the libcall is cse'd or moved. */
2539 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2540 NULL_RTX
, LCT_CONST
, outmode
,
2542 insns
= get_insns ();
2545 target
= gen_reg_rtx (outmode
);
2546 emit_libcall_block (insns
, target
, value
,
2547 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2552 /* It can't be done in this mode. Can we do it in a wider mode? */
2554 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2556 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2557 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2559 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2560 != CODE_FOR_nothing
)
2561 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2565 /* For certain operations, we need not actually extend
2566 the narrow operand, as long as we will truncate the
2567 results to the same narrowness. */
2569 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2570 (unoptab
== neg_optab
2571 || unoptab
== one_cmpl_optab
)
2572 && class == MODE_INT
);
2574 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2577 /* If we are generating clz using wider mode, adjust the
2579 if (unoptab
== clz_optab
&& temp
!= 0)
2580 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2581 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2582 - GET_MODE_BITSIZE (mode
)),
2583 target
, true, OPTAB_DIRECT
);
2587 if (class != MODE_INT
)
2590 target
= gen_reg_rtx (mode
);
2591 convert_move (target
, temp
, 0);
2595 return gen_lowpart (mode
, temp
);
2598 delete_insns_since (last
);
2603 /* One final attempt at implementing negation via subtraction,
2604 this time allowing widening of the operand. */
2605 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2608 temp
= expand_binop (mode
,
2609 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2610 CONST0_RTX (mode
), op0
,
2611 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2619 /* Emit code to compute the absolute value of OP0, with result to
2620 TARGET if convenient. (TARGET may be 0.) The return value says
2621 where the result actually is to be found.
2623 MODE is the mode of the operand; the mode of the result is
2624 different but can be deduced from MODE.
2629 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2630 int result_unsignedp
)
2635 result_unsignedp
= 1;
2637 /* First try to do it with a special abs instruction. */
2638 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2643 /* For floating point modes, try clearing the sign bit. */
2644 if (SCALAR_FLOAT_MODE_P (mode
))
2646 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2651 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2652 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2653 && !HONOR_SIGNED_ZEROS (mode
))
2655 rtx last
= get_last_insn ();
2657 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2659 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2665 delete_insns_since (last
);
2668 /* If this machine has expensive jumps, we can do integer absolute
2669 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2670 where W is the width of MODE. */
2672 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2674 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2675 size_int (GET_MODE_BITSIZE (mode
) - 1),
2678 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2681 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2682 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2692 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2693 int result_unsignedp
, int safe
)
2698 result_unsignedp
= 1;
2700 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2704 /* If that does not win, use conditional jump and negate. */
2706 /* It is safe to use the target if it is the same
2707 as the source if this is also a pseudo register */
2708 if (op0
== target
&& REG_P (op0
)
2709 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2712 op1
= gen_label_rtx ();
2713 if (target
== 0 || ! safe
2714 || GET_MODE (target
) != mode
2715 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2717 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2718 target
= gen_reg_rtx (mode
);
2720 emit_move_insn (target
, op0
);
2723 /* If this mode is an integer too wide to compare properly,
2724 compare word by word. Rely on CSE to optimize constant cases. */
2725 if (GET_MODE_CLASS (mode
) == MODE_INT
2726 && ! can_compare_p (GE
, mode
, ccp_jump
))
2727 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2730 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2731 NULL_RTX
, NULL_RTX
, op1
);
2733 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2736 emit_move_insn (target
, op0
);
2742 /* A subroutine of expand_copysign, perform the copysign operation using the
2743 abs and neg primitives advertised to exist on the target. The assumption
2744 is that we have a split register file, and leaving op0 in fp registers,
2745 and not playing with subregs so much, will help the register allocator. */
2748 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2749 int bitpos
, bool op0_is_abs
)
2751 enum machine_mode imode
;
2752 HOST_WIDE_INT hi
, lo
;
2761 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2768 if (target
== NULL_RTX
)
2769 target
= copy_to_reg (op0
);
2771 emit_move_insn (target
, op0
);
2774 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2776 imode
= int_mode_for_mode (mode
);
2777 if (imode
== BLKmode
)
2779 op1
= gen_lowpart (imode
, op1
);
2784 if (FLOAT_WORDS_BIG_ENDIAN
)
2785 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2787 word
= bitpos
/ BITS_PER_WORD
;
2788 bitpos
= bitpos
% BITS_PER_WORD
;
2789 op1
= operand_subword_force (op1
, word
, mode
);
2792 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2795 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2799 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2803 op1
= expand_binop (imode
, and_optab
, op1
,
2804 immed_double_const (lo
, hi
, imode
),
2805 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2807 label
= gen_label_rtx ();
2808 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2810 if (GET_CODE (op0
) == CONST_DOUBLE
)
2811 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2813 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2815 emit_move_insn (target
, op0
);
2823 /* A subroutine of expand_copysign, perform the entire copysign operation
2824 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2825 is true if op0 is known to have its sign bit clear. */
2828 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2829 int bitpos
, bool op0_is_abs
)
2831 enum machine_mode imode
;
2832 HOST_WIDE_INT hi
, lo
;
2833 int word
, nwords
, i
;
2836 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2838 imode
= int_mode_for_mode (mode
);
2839 if (imode
== BLKmode
)
2848 if (FLOAT_WORDS_BIG_ENDIAN
)
2849 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2851 word
= bitpos
/ BITS_PER_WORD
;
2852 bitpos
= bitpos
% BITS_PER_WORD
;
2853 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2856 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2859 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2863 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2867 if (target
== 0 || target
== op0
|| target
== op1
)
2868 target
= gen_reg_rtx (mode
);
2874 for (i
= 0; i
< nwords
; ++i
)
2876 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2877 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2882 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2883 immed_double_const (~lo
, ~hi
, imode
),
2884 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2886 op1
= expand_binop (imode
, and_optab
,
2887 operand_subword_force (op1
, i
, mode
),
2888 immed_double_const (lo
, hi
, imode
),
2889 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2891 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2892 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2893 if (temp
!= targ_piece
)
2894 emit_move_insn (targ_piece
, temp
);
2897 emit_move_insn (targ_piece
, op0_piece
);
2900 insns
= get_insns ();
2903 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2907 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2908 immed_double_const (lo
, hi
, imode
),
2909 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2911 op0
= gen_lowpart (imode
, op0
);
2913 op0
= expand_binop (imode
, and_optab
, op0
,
2914 immed_double_const (~lo
, ~hi
, imode
),
2915 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2917 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2918 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2919 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2925 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2926 scalar floating point mode. Return NULL if we do not know how to
2927 expand the operation inline. */
2930 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2932 enum machine_mode mode
= GET_MODE (op0
);
2933 const struct real_format
*fmt
;
2937 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2938 gcc_assert (GET_MODE (op1
) == mode
);
2940 /* First try to do it with a special instruction. */
2941 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2942 target
, 0, OPTAB_DIRECT
);
2946 fmt
= REAL_MODE_FORMAT (mode
);
2947 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2951 if (GET_CODE (op0
) == CONST_DOUBLE
)
2953 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2954 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2958 if (fmt
->signbit_ro
>= 0
2959 && (GET_CODE (op0
) == CONST_DOUBLE
2960 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2961 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2963 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2964 fmt
->signbit_ro
, op0_is_abs
);
2969 if (fmt
->signbit_rw
< 0)
2971 return expand_copysign_bit (mode
, op0
, op1
, target
,
2972 fmt
->signbit_rw
, op0_is_abs
);
2975 /* Generate an instruction whose insn-code is INSN_CODE,
2976 with two operands: an output TARGET and an input OP0.
2977 TARGET *must* be nonzero, and the output is always stored there.
2978 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2979 the value that is stored into TARGET. */
2982 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2985 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2990 /* Now, if insn does not accept our operands, put them into pseudos. */
2992 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
2993 op0
= copy_to_mode_reg (mode0
, op0
);
2995 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
)))
2996 temp
= gen_reg_rtx (GET_MODE (temp
));
2998 pat
= GEN_FCN (icode
) (temp
, op0
);
3000 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
3001 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
3006 emit_move_insn (target
, temp
);
3009 struct no_conflict_data
3011 rtx target
, first
, insn
;
3015 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3016 Set P->must_stay if the currently examined clobber / store has to stay
3017 in the list of insns that constitute the actual no_conflict block /
3020 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
3022 struct no_conflict_data
*p
= p0
;
3024 /* If this inns directly contributes to setting the target, it must stay. */
3025 if (reg_overlap_mentioned_p (p
->target
, dest
))
3026 p
->must_stay
= true;
3027 /* If we haven't committed to keeping any other insns in the list yet,
3028 there is nothing more to check. */
3029 else if (p
->insn
== p
->first
)
3031 /* If this insn sets / clobbers a register that feeds one of the insns
3032 already in the list, this insn has to stay too. */
3033 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3034 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3035 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3036 /* Likewise if this insn depends on a register set by a previous
3037 insn in the list, or if it sets a result (presumably a hard
3038 register) that is set or clobbered by a previous insn.
3039 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3040 SET_DEST perform the former check on the address, and the latter
3041 check on the MEM. */
3042 || (GET_CODE (set
) == SET
3043 && (modified_in_p (SET_SRC (set
), p
->first
)
3044 || modified_in_p (SET_DEST (set
), p
->first
)
3045 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3046 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3047 p
->must_stay
= true;
3050 /* Emit code to perform a series of operations on a multi-word quantity, one
3053 Such a block is preceded by a CLOBBER of the output, consists of multiple
3054 insns, each setting one word of the output, and followed by a SET copying
3055 the output to itself.
3057 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3058 note indicating that it doesn't conflict with the (also multi-word)
3059 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3062 INSNS is a block of code generated to perform the operation, not including
3063 the CLOBBER and final copy. All insns that compute intermediate values
3064 are first emitted, followed by the block as described above.
3066 TARGET, OP0, and OP1 are the output and inputs of the operations,
3067 respectively. OP1 may be zero for a unary operation.
3069 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3072 If TARGET is not a register, INSNS is simply emitted with no special
3073 processing. Likewise if anything in INSNS is not an INSN or if
3074 there is a libcall block inside INSNS.
3076 The final insn emitted is returned. */
3079 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3081 rtx prev
, next
, first
, last
, insn
;
3083 if (!REG_P (target
) || reload_in_progress
)
3084 return emit_insn (insns
);
3086 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3087 if (!NONJUMP_INSN_P (insn
)
3088 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3089 return emit_insn (insns
);
3091 /* First emit all insns that do not store into words of the output and remove
3092 these from the list. */
3093 for (insn
= insns
; insn
; insn
= next
)
3096 struct no_conflict_data data
;
3098 next
= NEXT_INSN (insn
);
3100 /* Some ports (cris) create a libcall regions at their own. We must
3101 avoid any potential nesting of LIBCALLs. */
3102 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3103 remove_note (insn
, note
);
3104 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3105 remove_note (insn
, note
);
3107 data
.target
= target
;
3111 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3112 if (! data
.must_stay
)
3114 if (PREV_INSN (insn
))
3115 NEXT_INSN (PREV_INSN (insn
)) = next
;
3120 PREV_INSN (next
) = PREV_INSN (insn
);
3126 prev
= get_last_insn ();
3128 /* Now write the CLOBBER of the output, followed by the setting of each
3129 of the words, followed by the final copy. */
3130 if (target
!= op0
&& target
!= op1
)
3131 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3133 for (insn
= insns
; insn
; insn
= next
)
3135 next
= NEXT_INSN (insn
);
3138 if (op1
&& REG_P (op1
))
3139 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3142 if (op0
&& REG_P (op0
))
3143 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3147 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3148 != CODE_FOR_nothing
)
3150 last
= emit_move_insn (target
, target
);
3152 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3156 last
= get_last_insn ();
3158 /* Remove any existing REG_EQUAL note from "last", or else it will
3159 be mistaken for a note referring to the full contents of the
3160 alleged libcall value when found together with the REG_RETVAL
3161 note added below. An existing note can come from an insn
3162 expansion at "last". */
3163 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3167 first
= get_insns ();
3169 first
= NEXT_INSN (prev
);
3171 /* Encapsulate the block so it gets manipulated as a unit. */
3172 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3174 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3179 /* Emit code to make a call to a constant function or a library call.
3181 INSNS is a list containing all insns emitted in the call.
3182 These insns leave the result in RESULT. Our block is to copy RESULT
3183 to TARGET, which is logically equivalent to EQUIV.
3185 We first emit any insns that set a pseudo on the assumption that these are
3186 loading constants into registers; doing so allows them to be safely cse'ed
3187 between blocks. Then we emit all the other insns in the block, followed by
3188 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3189 note with an operand of EQUIV.
3191 Moving assignments to pseudos outside of the block is done to improve
3192 the generated code, but is not required to generate correct code,
3193 hence being unable to move an assignment is not grounds for not making
3194 a libcall block. There are two reasons why it is safe to leave these
3195 insns inside the block: First, we know that these pseudos cannot be
3196 used in generated RTL outside the block since they are created for
3197 temporary purposes within the block. Second, CSE will not record the
3198 values of anything set inside a libcall block, so we know they must
3199 be dead at the end of the block.
3201 Except for the first group of insns (the ones setting pseudos), the
3202 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3205 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3207 rtx final_dest
= target
;
3208 rtx prev
, next
, first
, last
, insn
;
3210 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3211 into a MEM later. Protect the libcall block from this change. */
3212 if (! REG_P (target
) || REG_USERVAR_P (target
))
3213 target
= gen_reg_rtx (GET_MODE (target
));
3215 /* If we're using non-call exceptions, a libcall corresponding to an
3216 operation that may trap may also trap. */
3217 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3219 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3222 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3224 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3225 remove_note (insn
, note
);
3229 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3230 reg note to indicate that this call cannot throw or execute a nonlocal
3231 goto (unless there is already a REG_EH_REGION note, in which case
3233 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3236 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3239 XEXP (note
, 0) = constm1_rtx
;
3241 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3245 /* First emit all insns that set pseudos. Remove them from the list as
3246 we go. Avoid insns that set pseudos which were referenced in previous
3247 insns. These can be generated by move_by_pieces, for example,
3248 to update an address. Similarly, avoid insns that reference things
3249 set in previous insns. */
3251 for (insn
= insns
; insn
; insn
= next
)
3253 rtx set
= single_set (insn
);
3256 /* Some ports (cris) create a libcall regions at their own. We must
3257 avoid any potential nesting of LIBCALLs. */
3258 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3259 remove_note (insn
, note
);
3260 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3261 remove_note (insn
, note
);
3263 next
= NEXT_INSN (insn
);
3265 if (set
!= 0 && REG_P (SET_DEST (set
))
3266 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3268 struct no_conflict_data data
;
3270 data
.target
= const0_rtx
;
3274 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3275 if (! data
.must_stay
)
3277 if (PREV_INSN (insn
))
3278 NEXT_INSN (PREV_INSN (insn
)) = next
;
3283 PREV_INSN (next
) = PREV_INSN (insn
);
3289 /* Some ports use a loop to copy large arguments onto the stack.
3290 Don't move anything outside such a loop. */
3295 prev
= get_last_insn ();
3297 /* Write the remaining insns followed by the final copy. */
3299 for (insn
= insns
; insn
; insn
= next
)
3301 next
= NEXT_INSN (insn
);
3306 last
= emit_move_insn (target
, result
);
3307 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3308 != CODE_FOR_nothing
)
3309 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3312 /* Remove any existing REG_EQUAL note from "last", or else it will
3313 be mistaken for a note referring to the full contents of the
3314 libcall value when found together with the REG_RETVAL note added
3315 below. An existing note can come from an insn expansion at
3317 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3320 if (final_dest
!= target
)
3321 emit_move_insn (final_dest
, target
);
3324 first
= get_insns ();
3326 first
= NEXT_INSN (prev
);
3328 /* Encapsulate the block so it gets manipulated as a unit. */
3329 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3331 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3332 when the encapsulated region would not be in one basic block,
3333 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3335 bool attach_libcall_retval_notes
= true;
3336 next
= NEXT_INSN (last
);
3337 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3338 if (control_flow_insn_p (insn
))
3340 attach_libcall_retval_notes
= false;
3344 if (attach_libcall_retval_notes
)
3346 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3348 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3354 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3355 PURPOSE describes how this comparison will be used. CODE is the rtx
3356 comparison code we will be using.
3358 ??? Actually, CODE is slightly weaker than that. A target is still
3359 required to implement all of the normal bcc operations, but not
3360 required to implement all (or any) of the unordered bcc operations. */
3363 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3364 enum can_compare_purpose purpose
)
3368 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3370 if (purpose
== ccp_jump
)
3371 return bcc_gen_fctn
[(int) code
] != NULL
;
3372 else if (purpose
== ccp_store_flag
)
3373 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3375 /* There's only one cmov entry point, and it's allowed to fail. */
3378 if (purpose
== ccp_jump
3379 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3381 if (purpose
== ccp_cmov
3382 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3384 if (purpose
== ccp_store_flag
3385 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3387 mode
= GET_MODE_WIDER_MODE (mode
);
3389 while (mode
!= VOIDmode
);
3394 /* This function is called when we are going to emit a compare instruction that
3395 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3397 *PMODE is the mode of the inputs (in case they are const_int).
3398 *PUNSIGNEDP nonzero says that the operands are unsigned;
3399 this matters if they need to be widened.
3401 If they have mode BLKmode, then SIZE specifies the size of both operands.
3403 This function performs all the setup necessary so that the caller only has
3404 to emit a single comparison insn. This setup can involve doing a BLKmode
3405 comparison or emitting a library call to perform the comparison if no insn
3406 is available to handle it.
3407 The values which are passed in through pointers can be modified; the caller
3408 should perform the comparison on the modified values. Constant
3409 comparisons must have already been folded. */
3412 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3413 enum machine_mode
*pmode
, int *punsignedp
,
3414 enum can_compare_purpose purpose
)
3416 enum machine_mode mode
= *pmode
;
3417 rtx x
= *px
, y
= *py
;
3418 int unsignedp
= *punsignedp
;
3419 enum mode_class
class;
3421 class = GET_MODE_CLASS (mode
);
3423 /* If we are inside an appropriately-short loop and we are optimizing,
3424 force expensive constants into a register. */
3425 if (CONSTANT_P (x
) && optimize
3426 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3427 x
= force_reg (mode
, x
);
3429 if (CONSTANT_P (y
) && optimize
3430 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3431 y
= force_reg (mode
, y
);
3434 /* Make sure if we have a canonical comparison. The RTL
3435 documentation states that canonical comparisons are required only
3436 for targets which have cc0. */
3437 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3440 /* Don't let both operands fail to indicate the mode. */
3441 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3442 x
= force_reg (mode
, x
);
3444 /* Handle all BLKmode compares. */
3446 if (mode
== BLKmode
)
3448 enum machine_mode cmp_mode
, result_mode
;
3449 enum insn_code cmp_code
;
3454 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3458 /* Try to use a memory block compare insn - either cmpstr
3459 or cmpmem will do. */
3460 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3461 cmp_mode
!= VOIDmode
;
3462 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3464 cmp_code
= cmpmem_optab
[cmp_mode
];
3465 if (cmp_code
== CODE_FOR_nothing
)
3466 cmp_code
= cmpstr_optab
[cmp_mode
];
3467 if (cmp_code
== CODE_FOR_nothing
)
3468 cmp_code
= cmpstrn_optab
[cmp_mode
];
3469 if (cmp_code
== CODE_FOR_nothing
)
3472 /* Must make sure the size fits the insn's mode. */
3473 if ((GET_CODE (size
) == CONST_INT
3474 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3475 || (GET_MODE_BITSIZE (GET_MODE (size
))
3476 > GET_MODE_BITSIZE (cmp_mode
)))
3479 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3480 result
= gen_reg_rtx (result_mode
);
3481 size
= convert_to_mode (cmp_mode
, size
, 1);
3482 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3486 *pmode
= result_mode
;
3490 /* Otherwise call a library function, memcmp. */
3491 libfunc
= memcmp_libfunc
;
3492 length_type
= sizetype
;
3493 result_mode
= TYPE_MODE (integer_type_node
);
3494 cmp_mode
= TYPE_MODE (length_type
);
3495 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3496 TYPE_UNSIGNED (length_type
));
3498 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3505 *pmode
= result_mode
;
3509 /* Don't allow operands to the compare to trap, as that can put the
3510 compare and branch in different basic blocks. */
3511 if (flag_non_call_exceptions
)
3514 x
= force_reg (mode
, x
);
3516 y
= force_reg (mode
, y
);
3521 if (can_compare_p (*pcomparison
, mode
, purpose
))
3524 /* Handle a lib call just for the mode we are using. */
3526 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3528 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3531 /* If we want unsigned, and this mode has a distinct unsigned
3532 comparison routine, use that. */
3533 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3534 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3536 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3537 word_mode
, 2, x
, mode
, y
, mode
);
3541 if (TARGET_LIB_INT_CMP_BIASED
)
3542 /* Integer comparison returns a result that must be compared
3543 against 1, so that even if we do an unsigned compare
3544 afterward, there is still a value that can represent the
3545 result "less than". */
3555 gcc_assert (class == MODE_FLOAT
);
3556 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3559 /* Before emitting an insn with code ICODE, make sure that X, which is going
3560 to be used for operand OPNUM of the insn, is converted from mode MODE to
3561 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3562 that it is accepted by the operand predicate. Return the new value. */
3565 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3566 enum machine_mode wider_mode
, int unsignedp
)
3568 if (mode
!= wider_mode
)
3569 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3571 if (!insn_data
[icode
].operand
[opnum
].predicate
3572 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3576 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3582 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3583 we can do the comparison.
3584 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3585 be NULL_RTX which indicates that only a comparison is to be generated. */
3588 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3589 enum rtx_code comparison
, int unsignedp
, rtx label
)
3591 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3592 enum mode_class
class = GET_MODE_CLASS (mode
);
3593 enum machine_mode wider_mode
= mode
;
3595 /* Try combined insns first. */
3598 enum insn_code icode
;
3599 PUT_MODE (test
, wider_mode
);
3603 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3605 if (icode
!= CODE_FOR_nothing
3606 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3608 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3609 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3610 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3615 /* Handle some compares against zero. */
3616 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3617 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3619 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3620 emit_insn (GEN_FCN (icode
) (x
));
3622 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3626 /* Handle compares for which there is a directly suitable insn. */
3628 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3629 if (icode
!= CODE_FOR_nothing
)
3631 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3632 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3633 emit_insn (GEN_FCN (icode
) (x
, y
));
3635 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3639 if (class != MODE_INT
&& class != MODE_FLOAT
3640 && class != MODE_COMPLEX_FLOAT
)
3643 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3645 while (wider_mode
!= VOIDmode
);
3650 /* Generate code to compare X with Y so that the condition codes are
3651 set and to jump to LABEL if the condition is true. If X is a
3652 constant and Y is not a constant, then the comparison is swapped to
3653 ensure that the comparison RTL has the canonical form.
3655 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3656 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3657 the proper branch condition code.
3659 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3661 MODE is the mode of the inputs (in case they are const_int).
3663 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3664 be passed unchanged to emit_cmp_insn, then potentially converted into an
3665 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3668 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3669 enum machine_mode mode
, int unsignedp
, rtx label
)
3671 rtx op0
= x
, op1
= y
;
3673 /* Swap operands and condition to ensure canonical RTL. */
3674 if (swap_commutative_operands_p (x
, y
))
3676 /* If we're not emitting a branch, this means some caller
3681 comparison
= swap_condition (comparison
);
3685 /* If OP0 is still a constant, then both X and Y must be constants.
3686 Force X into a register to create canonical RTL. */
3687 if (CONSTANT_P (op0
))
3688 op0
= force_reg (mode
, op0
);
3692 comparison
= unsigned_condition (comparison
);
3694 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3696 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3699 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3702 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3703 enum machine_mode mode
, int unsignedp
)
3705 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3708 /* Emit a library call comparison between floating point X and Y.
3709 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3712 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3713 enum machine_mode
*pmode
, int *punsignedp
)
3715 enum rtx_code comparison
= *pcomparison
;
3716 enum rtx_code swapped
= swap_condition (comparison
);
3717 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3720 enum machine_mode orig_mode
= GET_MODE (x
);
3721 enum machine_mode mode
;
3722 rtx value
, target
, insns
, equiv
;
3724 bool reversed_p
= false;
3726 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3728 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3731 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3734 tmp
= x
; x
= y
; y
= tmp
;
3735 comparison
= swapped
;
3739 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3740 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3742 comparison
= reversed
;
3748 gcc_assert (mode
!= VOIDmode
);
3750 if (mode
!= orig_mode
)
3752 x
= convert_to_mode (mode
, x
, 0);
3753 y
= convert_to_mode (mode
, y
, 0);
3756 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3757 the RTL. The allows the RTL optimizers to delete the libcall if the
3758 condition can be determined at compile-time. */
3759 if (comparison
== UNORDERED
)
3761 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3762 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3763 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3764 temp
, const_true_rtx
, equiv
);
3768 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3769 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3771 rtx true_rtx
, false_rtx
;
3776 true_rtx
= const0_rtx
;
3777 false_rtx
= const_true_rtx
;
3781 true_rtx
= const_true_rtx
;
3782 false_rtx
= const0_rtx
;
3786 true_rtx
= const1_rtx
;
3787 false_rtx
= const0_rtx
;
3791 true_rtx
= const0_rtx
;
3792 false_rtx
= constm1_rtx
;
3796 true_rtx
= constm1_rtx
;
3797 false_rtx
= const0_rtx
;
3801 true_rtx
= const0_rtx
;
3802 false_rtx
= const1_rtx
;
3808 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3809 equiv
, true_rtx
, false_rtx
);
3814 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3815 word_mode
, 2, x
, mode
, y
, mode
);
3816 insns
= get_insns ();
3819 target
= gen_reg_rtx (word_mode
);
3820 emit_libcall_block (insns
, target
, value
, equiv
);
3822 if (comparison
== UNORDERED
3823 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3824 comparison
= reversed_p
? EQ
: NE
;
3829 *pcomparison
= comparison
;
3833 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3836 emit_indirect_jump (rtx loc
)
3838 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
3840 loc
= copy_to_mode_reg (Pmode
, loc
);
3842 emit_jump_insn (gen_indirect_jump (loc
));
3846 #ifdef HAVE_conditional_move
3848 /* Emit a conditional move instruction if the machine supports one for that
3849 condition and machine mode.
3851 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3852 the mode to use should they be constants. If it is VOIDmode, they cannot
3855 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3856 should be stored there. MODE is the mode to use should they be constants.
3857 If it is VOIDmode, they cannot both be constants.
3859 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3860 is not supported. */
3863 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3864 enum machine_mode cmode
, rtx op2
, rtx op3
,
3865 enum machine_mode mode
, int unsignedp
)
3867 rtx tem
, subtarget
, comparison
, insn
;
3868 enum insn_code icode
;
3869 enum rtx_code reversed
;
3871 /* If one operand is constant, make it the second one. Only do this
3872 if the other operand is not constant as well. */
3874 if (swap_commutative_operands_p (op0
, op1
))
3879 code
= swap_condition (code
);
3882 /* get_condition will prefer to generate LT and GT even if the old
3883 comparison was against zero, so undo that canonicalization here since
3884 comparisons against zero are cheaper. */
3885 if (code
== LT
&& op1
== const1_rtx
)
3886 code
= LE
, op1
= const0_rtx
;
3887 else if (code
== GT
&& op1
== constm1_rtx
)
3888 code
= GE
, op1
= const0_rtx
;
3890 if (cmode
== VOIDmode
)
3891 cmode
= GET_MODE (op0
);
3893 if (swap_commutative_operands_p (op2
, op3
)
3894 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3903 if (mode
== VOIDmode
)
3904 mode
= GET_MODE (op2
);
3906 icode
= movcc_gen_code
[mode
];
3908 if (icode
== CODE_FOR_nothing
)
3912 target
= gen_reg_rtx (mode
);
3916 /* If the insn doesn't accept these operands, put them in pseudos. */
3918 if (!insn_data
[icode
].operand
[0].predicate
3919 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3920 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3922 if (!insn_data
[icode
].operand
[2].predicate
3923 (op2
, insn_data
[icode
].operand
[2].mode
))
3924 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3926 if (!insn_data
[icode
].operand
[3].predicate
3927 (op3
, insn_data
[icode
].operand
[3].mode
))
3928 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3930 /* Everything should now be in the suitable form, so emit the compare insn
3931 and then the conditional move. */
3934 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3936 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3937 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3938 return NULL and let the caller figure out how best to deal with this
3940 if (GET_CODE (comparison
) != code
)
3943 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3945 /* If that failed, then give up. */
3951 if (subtarget
!= target
)
3952 convert_move (target
, subtarget
, 0);
3957 /* Return nonzero if a conditional move of mode MODE is supported.
3959 This function is for combine so it can tell whether an insn that looks
3960 like a conditional move is actually supported by the hardware. If we
3961 guess wrong we lose a bit on optimization, but that's it. */
3962 /* ??? sparc64 supports conditionally moving integers values based on fp
3963 comparisons, and vice versa. How do we handle them? */
3966 can_conditionally_move_p (enum machine_mode mode
)
3968 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3974 #endif /* HAVE_conditional_move */
3976 /* Emit a conditional addition instruction if the machine supports one for that
3977 condition and machine mode.
3979 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3980 the mode to use should they be constants. If it is VOIDmode, they cannot
3983 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3984 should be stored there. MODE is the mode to use should they be constants.
3985 If it is VOIDmode, they cannot both be constants.
3987 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3988 is not supported. */
3991 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3992 enum machine_mode cmode
, rtx op2
, rtx op3
,
3993 enum machine_mode mode
, int unsignedp
)
3995 rtx tem
, subtarget
, comparison
, insn
;
3996 enum insn_code icode
;
3997 enum rtx_code reversed
;
3999 /* If one operand is constant, make it the second one. Only do this
4000 if the other operand is not constant as well. */
4002 if (swap_commutative_operands_p (op0
, op1
))
4007 code
= swap_condition (code
);
4010 /* get_condition will prefer to generate LT and GT even if the old
4011 comparison was against zero, so undo that canonicalization here since
4012 comparisons against zero are cheaper. */
4013 if (code
== LT
&& op1
== const1_rtx
)
4014 code
= LE
, op1
= const0_rtx
;
4015 else if (code
== GT
&& op1
== constm1_rtx
)
4016 code
= GE
, op1
= const0_rtx
;
4018 if (cmode
== VOIDmode
)
4019 cmode
= GET_MODE (op0
);
4021 if (swap_commutative_operands_p (op2
, op3
)
4022 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4031 if (mode
== VOIDmode
)
4032 mode
= GET_MODE (op2
);
4034 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
4036 if (icode
== CODE_FOR_nothing
)
4040 target
= gen_reg_rtx (mode
);
4042 /* If the insn doesn't accept these operands, put them in pseudos. */
4044 if (!insn_data
[icode
].operand
[0].predicate
4045 (target
, insn_data
[icode
].operand
[0].mode
))
4046 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4050 if (!insn_data
[icode
].operand
[2].predicate
4051 (op2
, insn_data
[icode
].operand
[2].mode
))
4052 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4054 if (!insn_data
[icode
].operand
[3].predicate
4055 (op3
, insn_data
[icode
].operand
[3].mode
))
4056 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4058 /* Everything should now be in the suitable form, so emit the compare insn
4059 and then the conditional move. */
4062 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4064 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4065 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4066 return NULL and let the caller figure out how best to deal with this
4068 if (GET_CODE (comparison
) != code
)
4071 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4073 /* If that failed, then give up. */
4079 if (subtarget
!= target
)
4080 convert_move (target
, subtarget
, 0);
4085 /* These functions attempt to generate an insn body, rather than
4086 emitting the insn, but if the gen function already emits them, we
4087 make no attempt to turn them back into naked patterns. */
4089 /* Generate and return an insn body to add Y to X. */
4092 gen_add2_insn (rtx x
, rtx y
)
4094 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4096 gcc_assert (insn_data
[icode
].operand
[0].predicate
4097 (x
, insn_data
[icode
].operand
[0].mode
));
4098 gcc_assert (insn_data
[icode
].operand
[1].predicate
4099 (x
, insn_data
[icode
].operand
[1].mode
));
4100 gcc_assert (insn_data
[icode
].operand
[2].predicate
4101 (y
, insn_data
[icode
].operand
[2].mode
));
4103 return GEN_FCN (icode
) (x
, x
, y
);
4106 /* Generate and return an insn body to add r1 and c,
4107 storing the result in r0. */
4109 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4111 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4113 if (icode
== CODE_FOR_nothing
4114 || !(insn_data
[icode
].operand
[0].predicate
4115 (r0
, insn_data
[icode
].operand
[0].mode
))
4116 || !(insn_data
[icode
].operand
[1].predicate
4117 (r1
, insn_data
[icode
].operand
[1].mode
))
4118 || !(insn_data
[icode
].operand
[2].predicate
4119 (c
, insn_data
[icode
].operand
[2].mode
)))
4122 return GEN_FCN (icode
) (r0
, r1
, c
);
4126 have_add2_insn (rtx x
, rtx y
)
4130 gcc_assert (GET_MODE (x
) != VOIDmode
);
4132 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4134 if (icode
== CODE_FOR_nothing
)
4137 if (!(insn_data
[icode
].operand
[0].predicate
4138 (x
, insn_data
[icode
].operand
[0].mode
))
4139 || !(insn_data
[icode
].operand
[1].predicate
4140 (x
, insn_data
[icode
].operand
[1].mode
))
4141 || !(insn_data
[icode
].operand
[2].predicate
4142 (y
, insn_data
[icode
].operand
[2].mode
)))
4148 /* Generate and return an insn body to subtract Y from X. */
4151 gen_sub2_insn (rtx x
, rtx y
)
4153 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4155 gcc_assert (insn_data
[icode
].operand
[0].predicate
4156 (x
, insn_data
[icode
].operand
[0].mode
));
4157 gcc_assert (insn_data
[icode
].operand
[1].predicate
4158 (x
, insn_data
[icode
].operand
[1].mode
));
4159 gcc_assert (insn_data
[icode
].operand
[2].predicate
4160 (y
, insn_data
[icode
].operand
[2].mode
));
4162 return GEN_FCN (icode
) (x
, x
, y
);
4165 /* Generate and return an insn body to subtract r1 and c,
4166 storing the result in r0. */
4168 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4170 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4172 if (icode
== CODE_FOR_nothing
4173 || !(insn_data
[icode
].operand
[0].predicate
4174 (r0
, insn_data
[icode
].operand
[0].mode
))
4175 || !(insn_data
[icode
].operand
[1].predicate
4176 (r1
, insn_data
[icode
].operand
[1].mode
))
4177 || !(insn_data
[icode
].operand
[2].predicate
4178 (c
, insn_data
[icode
].operand
[2].mode
)))
4181 return GEN_FCN (icode
) (r0
, r1
, c
);
4185 have_sub2_insn (rtx x
, rtx y
)
4189 gcc_assert (GET_MODE (x
) != VOIDmode
);
4191 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4193 if (icode
== CODE_FOR_nothing
)
4196 if (!(insn_data
[icode
].operand
[0].predicate
4197 (x
, insn_data
[icode
].operand
[0].mode
))
4198 || !(insn_data
[icode
].operand
[1].predicate
4199 (x
, insn_data
[icode
].operand
[1].mode
))
4200 || !(insn_data
[icode
].operand
[2].predicate
4201 (y
, insn_data
[icode
].operand
[2].mode
)))
4207 /* Generate the body of an instruction to copy Y into X.
4208 It may be a list of insns, if one insn isn't enough. */
4211 gen_move_insn (rtx x
, rtx y
)
4216 emit_move_insn_1 (x
, y
);
4222 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4223 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4224 no such operation exists, CODE_FOR_nothing will be returned. */
4227 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4231 #ifdef HAVE_ptr_extend
4233 return CODE_FOR_ptr_extend
;
4236 tab
= unsignedp
? zext_optab
: sext_optab
;
4237 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4240 /* Generate the body of an insn to extend Y (with mode MFROM)
4241 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4244 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4245 enum machine_mode mfrom
, int unsignedp
)
4247 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4248 return GEN_FCN (icode
) (x
, y
);
4251 /* can_fix_p and can_float_p say whether the target machine
4252 can directly convert a given fixed point type to
4253 a given floating point type, or vice versa.
4254 The returned value is the CODE_FOR_... value to use,
4255 or CODE_FOR_nothing if these modes cannot be directly converted.
4257 *TRUNCP_PTR is set to 1 if it is necessary to output
4258 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4260 static enum insn_code
4261 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4262 int unsignedp
, int *truncp_ptr
)
4265 enum insn_code icode
;
4267 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4268 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4269 if (icode
!= CODE_FOR_nothing
)
4275 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4276 for this to work. We need to rework the fix* and ftrunc* patterns
4277 and documentation. */
4278 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4279 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4280 if (icode
!= CODE_FOR_nothing
4281 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4288 return CODE_FOR_nothing
;
4291 static enum insn_code
4292 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4297 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4298 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4301 /* Generate code to convert FROM to floating point
4302 and store in TO. FROM must be fixed point and not VOIDmode.
4303 UNSIGNEDP nonzero means regard FROM as unsigned.
4304 Normally this is done by correcting the final value
4305 if it is negative. */
4308 expand_float (rtx to
, rtx from
, int unsignedp
)
4310 enum insn_code icode
;
4312 enum machine_mode fmode
, imode
;
4313 bool can_do_signed
= false;
4315 /* Crash now, because we won't be able to decide which mode to use. */
4316 gcc_assert (GET_MODE (from
) != VOIDmode
);
4318 /* Look for an insn to do the conversion. Do it in the specified
4319 modes if possible; otherwise convert either input, output or both to
4320 wider mode. If the integer mode is wider than the mode of FROM,
4321 we can do the conversion signed even if the input is unsigned. */
4323 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4324 fmode
= GET_MODE_WIDER_MODE (fmode
))
4325 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4326 imode
= GET_MODE_WIDER_MODE (imode
))
4328 int doing_unsigned
= unsignedp
;
4330 if (fmode
!= GET_MODE (to
)
4331 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4334 icode
= can_float_p (fmode
, imode
, unsignedp
);
4335 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4337 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4338 if (scode
!= CODE_FOR_nothing
)
4339 can_do_signed
= true;
4340 if (imode
!= GET_MODE (from
))
4341 icode
= scode
, doing_unsigned
= 0;
4344 if (icode
!= CODE_FOR_nothing
)
4346 if (imode
!= GET_MODE (from
))
4347 from
= convert_to_mode (imode
, from
, unsignedp
);
4349 if (fmode
!= GET_MODE (to
))
4350 target
= gen_reg_rtx (fmode
);
4352 emit_unop_insn (icode
, target
, from
,
4353 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4356 convert_move (to
, target
, 0);
4361 /* Unsigned integer, and no way to convert directly.
4362 Convert as signed, then conditionally adjust the result. */
4363 if (unsignedp
&& can_do_signed
)
4365 rtx label
= gen_label_rtx ();
4367 REAL_VALUE_TYPE offset
;
4369 /* Look for a usable floating mode FMODE wider than the source and at
4370 least as wide as the target. Using FMODE will avoid rounding woes
4371 with unsigned values greater than the signed maximum value. */
4373 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4374 fmode
= GET_MODE_WIDER_MODE (fmode
))
4375 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4376 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4379 if (fmode
== VOIDmode
)
4381 /* There is no such mode. Pretend the target is wide enough. */
4382 fmode
= GET_MODE (to
);
4384 /* Avoid double-rounding when TO is narrower than FROM. */
4385 if ((significand_size (fmode
) + 1)
4386 < GET_MODE_BITSIZE (GET_MODE (from
)))
4389 rtx neglabel
= gen_label_rtx ();
4391 /* Don't use TARGET if it isn't a register, is a hard register,
4392 or is the wrong mode. */
4394 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4395 || GET_MODE (target
) != fmode
)
4396 target
= gen_reg_rtx (fmode
);
4398 imode
= GET_MODE (from
);
4399 do_pending_stack_adjust ();
4401 /* Test whether the sign bit is set. */
4402 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4405 /* The sign bit is not set. Convert as signed. */
4406 expand_float (target
, from
, 0);
4407 emit_jump_insn (gen_jump (label
));
4410 /* The sign bit is set.
4411 Convert to a usable (positive signed) value by shifting right
4412 one bit, while remembering if a nonzero bit was shifted
4413 out; i.e., compute (from & 1) | (from >> 1). */
4415 emit_label (neglabel
);
4416 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4417 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4418 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4420 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4422 expand_float (target
, temp
, 0);
4424 /* Multiply by 2 to undo the shift above. */
4425 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4426 target
, 0, OPTAB_LIB_WIDEN
);
4428 emit_move_insn (target
, temp
);
4430 do_pending_stack_adjust ();
4436 /* If we are about to do some arithmetic to correct for an
4437 unsigned operand, do it in a pseudo-register. */
4439 if (GET_MODE (to
) != fmode
4440 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4441 target
= gen_reg_rtx (fmode
);
4443 /* Convert as signed integer to floating. */
4444 expand_float (target
, from
, 0);
4446 /* If FROM is negative (and therefore TO is negative),
4447 correct its value by 2**bitwidth. */
4449 do_pending_stack_adjust ();
4450 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4454 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4455 temp
= expand_binop (fmode
, add_optab
, target
,
4456 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4457 target
, 0, OPTAB_LIB_WIDEN
);
4459 emit_move_insn (target
, temp
);
4461 do_pending_stack_adjust ();
4466 /* No hardware instruction available; call a library routine. */
4471 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4473 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4474 from
= convert_to_mode (SImode
, from
, unsignedp
);
4476 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4477 gcc_assert (libfunc
);
4481 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4482 GET_MODE (to
), 1, from
,
4484 insns
= get_insns ();
4487 emit_libcall_block (insns
, target
, value
,
4488 gen_rtx_FLOAT (GET_MODE (to
), from
));
4493 /* Copy result to requested destination
4494 if we have been computing in a temp location. */
4498 if (GET_MODE (target
) == GET_MODE (to
))
4499 emit_move_insn (to
, target
);
4501 convert_move (to
, target
, 0);
4505 /* Generate code to convert FROM to fixed point and store in TO. FROM
4506 must be floating point. */
4509 expand_fix (rtx to
, rtx from
, int unsignedp
)
4511 enum insn_code icode
;
4513 enum machine_mode fmode
, imode
;
4516 /* We first try to find a pair of modes, one real and one integer, at
4517 least as wide as FROM and TO, respectively, in which we can open-code
4518 this conversion. If the integer mode is wider than the mode of TO,
4519 we can do the conversion either signed or unsigned. */
4521 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4522 fmode
= GET_MODE_WIDER_MODE (fmode
))
4523 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4524 imode
= GET_MODE_WIDER_MODE (imode
))
4526 int doing_unsigned
= unsignedp
;
4528 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4529 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4530 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4532 if (icode
!= CODE_FOR_nothing
)
4534 if (fmode
!= GET_MODE (from
))
4535 from
= convert_to_mode (fmode
, from
, 0);
4539 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4540 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4544 if (imode
!= GET_MODE (to
))
4545 target
= gen_reg_rtx (imode
);
4547 emit_unop_insn (icode
, target
, from
,
4548 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4550 convert_move (to
, target
, unsignedp
);
4555 /* For an unsigned conversion, there is one more way to do it.
4556 If we have a signed conversion, we generate code that compares
4557 the real value to the largest representable positive number. If if
4558 is smaller, the conversion is done normally. Otherwise, subtract
4559 one plus the highest signed number, convert, and add it back.
4561 We only need to check all real modes, since we know we didn't find
4562 anything with a wider integer mode.
4564 This code used to extend FP value into mode wider than the destination.
4565 This is not needed. Consider, for instance conversion from SFmode
4568 The hot path trought the code is dealing with inputs smaller than 2^63
4569 and doing just the conversion, so there is no bits to lose.
4571 In the other path we know the value is positive in the range 2^63..2^64-1
4572 inclusive. (as for other imput overflow happens and result is undefined)
4573 So we know that the most important bit set in mantissa corresponds to
4574 2^63. The subtraction of 2^63 should not generate any rounding as it
4575 simply clears out that bit. The rest is trivial. */
4577 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4578 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4579 fmode
= GET_MODE_WIDER_MODE (fmode
))
4580 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4584 REAL_VALUE_TYPE offset
;
4585 rtx limit
, lab1
, lab2
, insn
;
4587 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4588 real_2expN (&offset
, bitsize
- 1);
4589 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4590 lab1
= gen_label_rtx ();
4591 lab2
= gen_label_rtx ();
4593 if (fmode
!= GET_MODE (from
))
4594 from
= convert_to_mode (fmode
, from
, 0);
4596 /* See if we need to do the subtraction. */
4597 do_pending_stack_adjust ();
4598 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4601 /* If not, do the signed "fix" and branch around fixup code. */
4602 expand_fix (to
, from
, 0);
4603 emit_jump_insn (gen_jump (lab2
));
4606 /* Otherwise, subtract 2**(N-1), convert to signed number,
4607 then add 2**(N-1). Do the addition using XOR since this
4608 will often generate better code. */
4610 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4611 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4612 expand_fix (to
, target
, 0);
4613 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4615 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4617 to
, 1, OPTAB_LIB_WIDEN
);
4620 emit_move_insn (to
, target
);
4624 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4625 != CODE_FOR_nothing
)
4627 /* Make a place for a REG_NOTE and add it. */
4628 insn
= emit_move_insn (to
, to
);
4629 set_unique_reg_note (insn
,
4631 gen_rtx_fmt_e (UNSIGNED_FIX
,
4639 /* We can't do it with an insn, so use a library call. But first ensure
4640 that the mode of TO is at least as wide as SImode, since those are the
4641 only library calls we know about. */
4643 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4645 target
= gen_reg_rtx (SImode
);
4647 expand_fix (target
, from
, unsignedp
);
4655 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4656 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4657 gcc_assert (libfunc
);
4661 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4662 GET_MODE (to
), 1, from
,
4664 insns
= get_insns ();
4667 emit_libcall_block (insns
, target
, value
,
4668 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4669 GET_MODE (to
), from
));
4674 if (GET_MODE (to
) == GET_MODE (target
))
4675 emit_move_insn (to
, target
);
4677 convert_move (to
, target
, 0);
4681 /* Report whether we have an instruction to perform the operation
4682 specified by CODE on operands of mode MODE. */
4684 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4686 return (code_to_optab
[(int) code
] != 0
4687 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4688 != CODE_FOR_nothing
));
4691 /* Create a blank optab. */
4696 optab op
= ggc_alloc (sizeof (struct optab
));
4697 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4699 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4700 op
->handlers
[i
].libfunc
= 0;
4706 static convert_optab
4707 new_convert_optab (void)
4710 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4711 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4712 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4714 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4715 op
->handlers
[i
][j
].libfunc
= 0;
4720 /* Same, but fill in its code as CODE, and write it into the
4721 code_to_optab table. */
4723 init_optab (enum rtx_code code
)
4725 optab op
= new_optab ();
4727 code_to_optab
[(int) code
] = op
;
4731 /* Same, but fill in its code as CODE, and do _not_ write it into
4732 the code_to_optab table. */
4734 init_optabv (enum rtx_code code
)
4736 optab op
= new_optab ();
4741 /* Conversion optabs never go in the code_to_optab table. */
4742 static inline convert_optab
4743 init_convert_optab (enum rtx_code code
)
4745 convert_optab op
= new_convert_optab ();
4750 /* Initialize the libfunc fields of an entire group of entries in some
4751 optab. Each entry is set equal to a string consisting of a leading
4752 pair of underscores followed by a generic operation name followed by
4753 a mode name (downshifted to lowercase) followed by a single character
4754 representing the number of operands for the given operation (which is
4755 usually one of the characters '2', '3', or '4').
4757 OPTABLE is the table in which libfunc fields are to be initialized.
4758 FIRST_MODE is the first machine mode index in the given optab to
4760 LAST_MODE is the last machine mode index in the given optab to
4762 OPNAME is the generic (string) name of the operation.
4763 SUFFIX is the character which specifies the number of operands for
4764 the given generic operation.
4768 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4769 const char *opname
, int suffix
)
4772 unsigned opname_len
= strlen (opname
);
4774 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4775 mode
= (enum machine_mode
) ((int) mode
+ 1))
4777 const char *mname
= GET_MODE_NAME (mode
);
4778 unsigned mname_len
= strlen (mname
);
4779 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4786 for (q
= opname
; *q
; )
4788 for (q
= mname
; *q
; q
++)
4789 *p
++ = TOLOWER (*q
);
4793 optable
->handlers
[(int) mode
].libfunc
4794 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4798 /* Initialize the libfunc fields of an entire group of entries in some
4799 optab which correspond to all integer mode operations. The parameters
4800 have the same meaning as similarly named ones for the `init_libfuncs'
4801 routine. (See above). */
4804 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4806 int maxsize
= 2*BITS_PER_WORD
;
4807 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4808 maxsize
= LONG_LONG_TYPE_SIZE
;
4809 init_libfuncs (optable
, word_mode
,
4810 mode_for_size (maxsize
, MODE_INT
, 0),
4814 /* Initialize the libfunc fields of an entire group of entries in some
4815 optab which correspond to all real mode operations. The parameters
4816 have the same meaning as similarly named ones for the `init_libfuncs'
4817 routine. (See above). */
4820 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4822 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4825 /* Initialize the libfunc fields of an entire group of entries of an
4826 inter-mode-class conversion optab. The string formation rules are
4827 similar to the ones for init_libfuncs, above, but instead of having
4828 a mode name and an operand count these functions have two mode names
4829 and no operand count. */
4831 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4832 enum mode_class from_class
,
4833 enum mode_class to_class
)
4835 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4836 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4837 size_t opname_len
= strlen (opname
);
4838 size_t max_mname_len
= 0;
4840 enum machine_mode fmode
, tmode
;
4841 const char *fname
, *tname
;
4843 char *libfunc_name
, *suffix
;
4846 for (fmode
= first_from_mode
;
4848 fmode
= GET_MODE_WIDER_MODE (fmode
))
4849 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4851 for (tmode
= first_to_mode
;
4853 tmode
= GET_MODE_WIDER_MODE (tmode
))
4854 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4856 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4857 libfunc_name
[0] = '_';
4858 libfunc_name
[1] = '_';
4859 memcpy (&libfunc_name
[2], opname
, opname_len
);
4860 suffix
= libfunc_name
+ opname_len
+ 2;
4862 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4863 fmode
= GET_MODE_WIDER_MODE (fmode
))
4864 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4865 tmode
= GET_MODE_WIDER_MODE (tmode
))
4867 fname
= GET_MODE_NAME (fmode
);
4868 tname
= GET_MODE_NAME (tmode
);
4871 for (q
= fname
; *q
; p
++, q
++)
4873 for (q
= tname
; *q
; p
++, q
++)
4878 tab
->handlers
[tmode
][fmode
].libfunc
4879 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4884 /* Initialize the libfunc fields of an entire group of entries of an
4885 intra-mode-class conversion optab. The string formation rules are
4886 similar to the ones for init_libfunc, above. WIDENING says whether
4887 the optab goes from narrow to wide modes or vice versa. These functions
4888 have two mode names _and_ an operand count. */
4890 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4891 enum mode_class
class, bool widening
)
4893 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4894 size_t opname_len
= strlen (opname
);
4895 size_t max_mname_len
= 0;
4897 enum machine_mode nmode
, wmode
;
4898 const char *nname
, *wname
;
4900 char *libfunc_name
, *suffix
;
4903 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4904 nmode
= GET_MODE_WIDER_MODE (nmode
))
4905 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4907 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4908 libfunc_name
[0] = '_';
4909 libfunc_name
[1] = '_';
4910 memcpy (&libfunc_name
[2], opname
, opname_len
);
4911 suffix
= libfunc_name
+ opname_len
+ 2;
4913 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4914 nmode
= GET_MODE_WIDER_MODE (nmode
))
4915 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4916 wmode
= GET_MODE_WIDER_MODE (wmode
))
4918 nname
= GET_MODE_NAME (nmode
);
4919 wname
= GET_MODE_NAME (wmode
);
4922 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4924 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4930 tab
->handlers
[widening
? wmode
: nmode
]
4931 [widening
? nmode
: wmode
].libfunc
4932 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4939 init_one_libfunc (const char *name
)
4943 /* Create a FUNCTION_DECL that can be passed to
4944 targetm.encode_section_info. */
4945 /* ??? We don't have any type information except for this is
4946 a function. Pretend this is "int foo()". */
4947 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4948 build_function_type (integer_type_node
, NULL_TREE
));
4949 DECL_ARTIFICIAL (decl
) = 1;
4950 DECL_EXTERNAL (decl
) = 1;
4951 TREE_PUBLIC (decl
) = 1;
4953 symbol
= XEXP (DECL_RTL (decl
), 0);
4955 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4956 are the flags assigned by targetm.encode_section_info. */
4957 SYMBOL_REF_DECL (symbol
) = 0;
4962 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4963 MODE to NAME, which should be either 0 or a string constant. */
4965 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4968 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4970 optable
->handlers
[mode
].libfunc
= 0;
4973 /* Call this to reset the function entry for one conversion optab
4974 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4975 either 0 or a string constant. */
4977 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4978 enum machine_mode fmode
, const char *name
)
4981 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4983 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4986 /* Call this once to initialize the contents of the optabs
4987 appropriately for the current target machine. */
4994 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4996 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4997 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4999 #ifdef HAVE_conditional_move
5000 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5001 movcc_gen_code
[i
] = CODE_FOR_nothing
;
5004 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5006 vcond_gen_code
[i
] = CODE_FOR_nothing
;
5007 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
5010 add_optab
= init_optab (PLUS
);
5011 addv_optab
= init_optabv (PLUS
);
5012 sub_optab
= init_optab (MINUS
);
5013 subv_optab
= init_optabv (MINUS
);
5014 smul_optab
= init_optab (MULT
);
5015 smulv_optab
= init_optabv (MULT
);
5016 smul_highpart_optab
= init_optab (UNKNOWN
);
5017 umul_highpart_optab
= init_optab (UNKNOWN
);
5018 smul_widen_optab
= init_optab (UNKNOWN
);
5019 umul_widen_optab
= init_optab (UNKNOWN
);
5020 usmul_widen_optab
= init_optab (UNKNOWN
);
5021 sdiv_optab
= init_optab (DIV
);
5022 sdivv_optab
= init_optabv (DIV
);
5023 sdivmod_optab
= init_optab (UNKNOWN
);
5024 udiv_optab
= init_optab (UDIV
);
5025 udivmod_optab
= init_optab (UNKNOWN
);
5026 smod_optab
= init_optab (MOD
);
5027 umod_optab
= init_optab (UMOD
);
5028 fmod_optab
= init_optab (UNKNOWN
);
5029 drem_optab
= init_optab (UNKNOWN
);
5030 ftrunc_optab
= init_optab (UNKNOWN
);
5031 and_optab
= init_optab (AND
);
5032 ior_optab
= init_optab (IOR
);
5033 xor_optab
= init_optab (XOR
);
5034 ashl_optab
= init_optab (ASHIFT
);
5035 ashr_optab
= init_optab (ASHIFTRT
);
5036 lshr_optab
= init_optab (LSHIFTRT
);
5037 rotl_optab
= init_optab (ROTATE
);
5038 rotr_optab
= init_optab (ROTATERT
);
5039 smin_optab
= init_optab (SMIN
);
5040 smax_optab
= init_optab (SMAX
);
5041 umin_optab
= init_optab (UMIN
);
5042 umax_optab
= init_optab (UMAX
);
5043 pow_optab
= init_optab (UNKNOWN
);
5044 atan2_optab
= init_optab (UNKNOWN
);
5046 /* These three have codes assigned exclusively for the sake of
5048 mov_optab
= init_optab (SET
);
5049 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5050 cmp_optab
= init_optab (COMPARE
);
5052 ucmp_optab
= init_optab (UNKNOWN
);
5053 tst_optab
= init_optab (UNKNOWN
);
5055 eq_optab
= init_optab (EQ
);
5056 ne_optab
= init_optab (NE
);
5057 gt_optab
= init_optab (GT
);
5058 ge_optab
= init_optab (GE
);
5059 lt_optab
= init_optab (LT
);
5060 le_optab
= init_optab (LE
);
5061 unord_optab
= init_optab (UNORDERED
);
5063 neg_optab
= init_optab (NEG
);
5064 negv_optab
= init_optabv (NEG
);
5065 abs_optab
= init_optab (ABS
);
5066 absv_optab
= init_optabv (ABS
);
5067 addcc_optab
= init_optab (UNKNOWN
);
5068 one_cmpl_optab
= init_optab (NOT
);
5069 ffs_optab
= init_optab (FFS
);
5070 clz_optab
= init_optab (CLZ
);
5071 ctz_optab
= init_optab (CTZ
);
5072 popcount_optab
= init_optab (POPCOUNT
);
5073 parity_optab
= init_optab (PARITY
);
5074 sqrt_optab
= init_optab (SQRT
);
5075 floor_optab
= init_optab (UNKNOWN
);
5076 lfloor_optab
= init_optab (UNKNOWN
);
5077 ceil_optab
= init_optab (UNKNOWN
);
5078 lceil_optab
= init_optab (UNKNOWN
);
5079 round_optab
= init_optab (UNKNOWN
);
5080 btrunc_optab
= init_optab (UNKNOWN
);
5081 nearbyint_optab
= init_optab (UNKNOWN
);
5082 rint_optab
= init_optab (UNKNOWN
);
5083 lrint_optab
= init_optab (UNKNOWN
);
5084 sincos_optab
= init_optab (UNKNOWN
);
5085 sin_optab
= init_optab (UNKNOWN
);
5086 asin_optab
= init_optab (UNKNOWN
);
5087 cos_optab
= init_optab (UNKNOWN
);
5088 acos_optab
= init_optab (UNKNOWN
);
5089 exp_optab
= init_optab (UNKNOWN
);
5090 exp10_optab
= init_optab (UNKNOWN
);
5091 exp2_optab
= init_optab (UNKNOWN
);
5092 expm1_optab
= init_optab (UNKNOWN
);
5093 ldexp_optab
= init_optab (UNKNOWN
);
5094 logb_optab
= init_optab (UNKNOWN
);
5095 ilogb_optab
= init_optab (UNKNOWN
);
5096 log_optab
= init_optab (UNKNOWN
);
5097 log10_optab
= init_optab (UNKNOWN
);
5098 log2_optab
= init_optab (UNKNOWN
);
5099 log1p_optab
= init_optab (UNKNOWN
);
5100 tan_optab
= init_optab (UNKNOWN
);
5101 atan_optab
= init_optab (UNKNOWN
);
5102 copysign_optab
= init_optab (UNKNOWN
);
5104 strlen_optab
= init_optab (UNKNOWN
);
5105 cbranch_optab
= init_optab (UNKNOWN
);
5106 cmov_optab
= init_optab (UNKNOWN
);
5107 cstore_optab
= init_optab (UNKNOWN
);
5108 push_optab
= init_optab (UNKNOWN
);
5110 reduc_smax_optab
= init_optab (UNKNOWN
);
5111 reduc_umax_optab
= init_optab (UNKNOWN
);
5112 reduc_smin_optab
= init_optab (UNKNOWN
);
5113 reduc_umin_optab
= init_optab (UNKNOWN
);
5114 reduc_splus_optab
= init_optab (UNKNOWN
);
5115 reduc_uplus_optab
= init_optab (UNKNOWN
);
5117 vec_extract_optab
= init_optab (UNKNOWN
);
5118 vec_set_optab
= init_optab (UNKNOWN
);
5119 vec_init_optab
= init_optab (UNKNOWN
);
5120 vec_shl_optab
= init_optab (UNKNOWN
);
5121 vec_shr_optab
= init_optab (UNKNOWN
);
5122 vec_realign_load_optab
= init_optab (UNKNOWN
);
5123 movmisalign_optab
= init_optab (UNKNOWN
);
5125 powi_optab
= init_optab (UNKNOWN
);
5128 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5129 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5130 trunc_optab
= init_convert_optab (TRUNCATE
);
5131 sfix_optab
= init_convert_optab (FIX
);
5132 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5133 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5134 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5135 sfloat_optab
= init_convert_optab (FLOAT
);
5136 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5138 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5140 movmem_optab
[i
] = CODE_FOR_nothing
;
5141 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5142 cmpstrn_optab
[i
] = CODE_FOR_nothing
;
5143 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5144 setmem_optab
[i
] = CODE_FOR_nothing
;
5146 sync_add_optab
[i
] = CODE_FOR_nothing
;
5147 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5148 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5149 sync_and_optab
[i
] = CODE_FOR_nothing
;
5150 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5151 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5152 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5153 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5154 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5155 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5156 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5157 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5158 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5159 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5160 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5161 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5162 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5163 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5164 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5165 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5166 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5167 sync_lock_release
[i
] = CODE_FOR_nothing
;
5169 #ifdef HAVE_SECONDARY_RELOADS
5170 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5174 /* Fill in the optabs with the insns we support. */
5177 /* Initialize the optabs with the names of the library functions. */
5178 init_integral_libfuncs (add_optab
, "add", '3');
5179 init_floating_libfuncs (add_optab
, "add", '3');
5180 init_integral_libfuncs (addv_optab
, "addv", '3');
5181 init_floating_libfuncs (addv_optab
, "add", '3');
5182 init_integral_libfuncs (sub_optab
, "sub", '3');
5183 init_floating_libfuncs (sub_optab
, "sub", '3');
5184 init_integral_libfuncs (subv_optab
, "subv", '3');
5185 init_floating_libfuncs (subv_optab
, "sub", '3');
5186 init_integral_libfuncs (smul_optab
, "mul", '3');
5187 init_floating_libfuncs (smul_optab
, "mul", '3');
5188 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5189 init_floating_libfuncs (smulv_optab
, "mul", '3');
5190 init_integral_libfuncs (sdiv_optab
, "div", '3');
5191 init_floating_libfuncs (sdiv_optab
, "div", '3');
5192 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5193 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5194 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5195 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5196 init_integral_libfuncs (smod_optab
, "mod", '3');
5197 init_integral_libfuncs (umod_optab
, "umod", '3');
5198 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5199 init_integral_libfuncs (and_optab
, "and", '3');
5200 init_integral_libfuncs (ior_optab
, "ior", '3');
5201 init_integral_libfuncs (xor_optab
, "xor", '3');
5202 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5203 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5204 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5205 init_integral_libfuncs (smin_optab
, "min", '3');
5206 init_floating_libfuncs (smin_optab
, "min", '3');
5207 init_integral_libfuncs (smax_optab
, "max", '3');
5208 init_floating_libfuncs (smax_optab
, "max", '3');
5209 init_integral_libfuncs (umin_optab
, "umin", '3');
5210 init_integral_libfuncs (umax_optab
, "umax", '3');
5211 init_integral_libfuncs (neg_optab
, "neg", '2');
5212 init_floating_libfuncs (neg_optab
, "neg", '2');
5213 init_integral_libfuncs (negv_optab
, "negv", '2');
5214 init_floating_libfuncs (negv_optab
, "neg", '2');
5215 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5216 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5217 init_integral_libfuncs (clz_optab
, "clz", '2');
5218 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5219 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5220 init_integral_libfuncs (parity_optab
, "parity", '2');
5222 /* Comparison libcalls for integers MUST come in pairs,
5224 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5225 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5226 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5228 /* EQ etc are floating point only. */
5229 init_floating_libfuncs (eq_optab
, "eq", '2');
5230 init_floating_libfuncs (ne_optab
, "ne", '2');
5231 init_floating_libfuncs (gt_optab
, "gt", '2');
5232 init_floating_libfuncs (ge_optab
, "ge", '2');
5233 init_floating_libfuncs (lt_optab
, "lt", '2');
5234 init_floating_libfuncs (le_optab
, "le", '2');
5235 init_floating_libfuncs (unord_optab
, "unord", '2');
5237 init_floating_libfuncs (powi_optab
, "powi", '2');
5240 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5241 MODE_INT
, MODE_FLOAT
);
5242 init_interclass_conv_libfuncs (ufloat_optab
, "floatun",
5243 MODE_INT
, MODE_FLOAT
);
5244 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5245 MODE_FLOAT
, MODE_INT
);
5246 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5247 MODE_FLOAT
, MODE_INT
);
5249 /* sext_optab is also used for FLOAT_EXTEND. */
5250 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5251 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5253 /* Use cabs for double complex abs, since systems generally have cabs.
5254 Don't define any libcall for float complex, so that cabs will be used. */
5255 if (complex_double_type_node
)
5256 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5257 = init_one_libfunc ("cabs");
5259 /* The ffs function operates on `int'. */
5260 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5261 = init_one_libfunc ("ffs");
5263 abort_libfunc
= init_one_libfunc ("abort");
5264 memcpy_libfunc
= init_one_libfunc ("memcpy");
5265 memmove_libfunc
= init_one_libfunc ("memmove");
5266 memcmp_libfunc
= init_one_libfunc ("memcmp");
5267 memset_libfunc
= init_one_libfunc ("memset");
5268 setbits_libfunc
= init_one_libfunc ("__setbits");
5270 #ifndef DONT_USE_BUILTIN_SETJMP
5271 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5272 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5274 setjmp_libfunc
= init_one_libfunc ("setjmp");
5275 longjmp_libfunc
= init_one_libfunc ("longjmp");
5277 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5278 unwind_sjlj_unregister_libfunc
5279 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5281 /* For function entry/exit instrumentation. */
5282 profile_function_entry_libfunc
5283 = init_one_libfunc ("__cyg_profile_func_enter");
5284 profile_function_exit_libfunc
5285 = init_one_libfunc ("__cyg_profile_func_exit");
5287 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5289 if (HAVE_conditional_trap
)
5290 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5292 /* Allow the target to add more libcalls or rename some, etc. */
5293 targetm
.init_libfuncs ();
5298 /* Print information about the current contents of the optabs on
5302 debug_optab_libfuncs (void)
5308 /* Dump the arithmetic optabs. */
5309 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5310 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5313 struct optab_handlers
*h
;
5316 h
= &o
->handlers
[j
];
5319 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5320 fprintf (stderr
, "%s\t%s:\t%s\n",
5321 GET_RTX_NAME (o
->code
),
5323 XSTR (h
->libfunc
, 0));
5327 /* Dump the conversion optabs. */
5328 for (i
= 0; i
< (int) COI_MAX
; ++i
)
5329 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5330 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5333 struct optab_handlers
*h
;
5335 o
= &convert_optab_table
[i
];
5336 h
= &o
->handlers
[j
][k
];
5339 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5340 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5341 GET_RTX_NAME (o
->code
),
5344 XSTR (h
->libfunc
, 0));
5352 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5353 CODE. Return 0 on failure. */
5356 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5357 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5359 enum machine_mode mode
= GET_MODE (op1
);
5360 enum insn_code icode
;
5363 if (!HAVE_conditional_trap
)
5366 if (mode
== VOIDmode
)
5369 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5370 if (icode
== CODE_FOR_nothing
)
5374 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5375 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5381 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5383 PUT_CODE (trap_rtx
, code
);
5384 gcc_assert (HAVE_conditional_trap
);
5385 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5389 insn
= get_insns ();
5396 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5397 or unsigned operation code. */
5399 static enum rtx_code
5400 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5412 code
= unsignedp
? LTU
: LT
;
5415 code
= unsignedp
? LEU
: LE
;
5418 code
= unsignedp
? GTU
: GT
;
5421 code
= unsignedp
? GEU
: GE
;
5424 case UNORDERED_EXPR
:
5455 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5456 unsigned operators. Do not generate compare instruction. */
5459 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5461 enum rtx_code rcode
;
5463 rtx rtx_op0
, rtx_op1
;
5465 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5466 ensures that condition is a relational operation. */
5467 gcc_assert (COMPARISON_CLASS_P (cond
));
5469 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5470 t_op0
= TREE_OPERAND (cond
, 0);
5471 t_op1
= TREE_OPERAND (cond
, 1);
5473 /* Expand operands. */
5474 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5475 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5477 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5478 && GET_MODE (rtx_op0
) != VOIDmode
)
5479 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5481 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5482 && GET_MODE (rtx_op1
) != VOIDmode
)
5483 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5485 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5488 /* Return insn code for VEC_COND_EXPR EXPR. */
5490 static inline enum insn_code
5491 get_vcond_icode (tree expr
, enum machine_mode mode
)
5493 enum insn_code icode
= CODE_FOR_nothing
;
5495 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5496 icode
= vcondu_gen_code
[mode
];
5498 icode
= vcond_gen_code
[mode
];
5502 /* Return TRUE iff, appropriate vector insns are available
5503 for vector cond expr expr in VMODE mode. */
5506 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5508 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5513 /* Generate insns for VEC_COND_EXPR. */
5516 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5518 enum insn_code icode
;
5519 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5520 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5521 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5523 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5524 if (icode
== CODE_FOR_nothing
)
5527 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5528 target
= gen_reg_rtx (mode
);
5530 /* Get comparison rtx. First expand both cond expr operands. */
5531 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5533 cc_op0
= XEXP (comparison
, 0);
5534 cc_op1
= XEXP (comparison
, 1);
5535 /* Expand both operands and force them in reg, if required. */
5536 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5537 NULL_RTX
, VOIDmode
, 1);
5538 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5539 && mode
!= VOIDmode
)
5540 rtx_op1
= force_reg (mode
, rtx_op1
);
5542 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5543 NULL_RTX
, VOIDmode
, 1);
5544 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5545 && mode
!= VOIDmode
)
5546 rtx_op2
= force_reg (mode
, rtx_op2
);
5548 /* Emit instruction! */
5549 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5550 comparison
, cc_op0
, cc_op1
));
5556 /* This is an internal subroutine of the other compare_and_swap expanders.
5557 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5558 operation. TARGET is an optional place to store the value result of
5559 the operation. ICODE is the particular instruction to expand. Return
5560 the result of the operation. */
5563 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5564 rtx target
, enum insn_code icode
)
5566 enum machine_mode mode
= GET_MODE (mem
);
5569 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5570 target
= gen_reg_rtx (mode
);
5572 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5573 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5574 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5575 old_val
= force_reg (mode
, old_val
);
5577 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5578 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5579 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5580 new_val
= force_reg (mode
, new_val
);
5582 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5583 if (insn
== NULL_RTX
)
5590 /* Expand a compare-and-swap operation and return its value. */
5593 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5595 enum machine_mode mode
= GET_MODE (mem
);
5596 enum insn_code icode
= sync_compare_and_swap
[mode
];
5598 if (icode
== CODE_FOR_nothing
)
5601 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5604 /* Expand a compare-and-swap operation and store true into the result if
5605 the operation was successful and false otherwise. Return the result.
5606 Unlike other routines, TARGET is not optional. */
5609 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5611 enum machine_mode mode
= GET_MODE (mem
);
5612 enum insn_code icode
;
5613 rtx subtarget
, label0
, label1
;
5615 /* If the target supports a compare-and-swap pattern that simultaneously
5616 sets some flag for success, then use it. Otherwise use the regular
5617 compare-and-swap and follow that immediately with a compare insn. */
5618 icode
= sync_compare_and_swap_cc
[mode
];
5622 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5624 if (subtarget
!= NULL_RTX
)
5628 case CODE_FOR_nothing
:
5629 icode
= sync_compare_and_swap
[mode
];
5630 if (icode
== CODE_FOR_nothing
)
5633 /* Ensure that if old_val == mem, that we're not comparing
5634 against an old value. */
5635 if (MEM_P (old_val
))
5636 old_val
= force_reg (mode
, old_val
);
5638 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5640 if (subtarget
== NULL_RTX
)
5643 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5646 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5647 setcc instruction from the beginning. We don't work too hard here,
5648 but it's nice to not be stupid about initial code gen either. */
5649 if (STORE_FLAG_VALUE
== 1)
5651 icode
= setcc_gen_code
[EQ
];
5652 if (icode
!= CODE_FOR_nothing
)
5654 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5658 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5659 subtarget
= gen_reg_rtx (cmode
);
5661 insn
= GEN_FCN (icode
) (subtarget
);
5665 if (GET_MODE (target
) != GET_MODE (subtarget
))
5667 convert_move (target
, subtarget
, 1);
5675 /* Without an appropriate setcc instruction, use a set of branches to
5676 get 1 and 0 stored into target. Presumably if the target has a
5677 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5679 label0
= gen_label_rtx ();
5680 label1
= gen_label_rtx ();
5682 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
5683 emit_move_insn (target
, const0_rtx
);
5684 emit_jump_insn (gen_jump (label1
));
5686 emit_label (label0
);
5687 emit_move_insn (target
, const1_rtx
);
5688 emit_label (label1
);
5693 /* This is a helper function for the other atomic operations. This function
5694 emits a loop that contains SEQ that iterates until a compare-and-swap
5695 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5696 a set of instructions that takes a value from OLD_REG as an input and
5697 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5698 set to the current contents of MEM. After SEQ, a compare-and-swap will
5699 attempt to update MEM with NEW_REG. The function returns true when the
5700 loop was generated successfully. */
5703 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5705 enum machine_mode mode
= GET_MODE (mem
);
5706 enum insn_code icode
;
5707 rtx label
, cmp_reg
, subtarget
;
5709 /* The loop we want to generate looks like
5715 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5716 if (cmp_reg != old_reg)
5719 Note that we only do the plain load from memory once. Subsequent
5720 iterations use the value loaded by the compare-and-swap pattern. */
5722 label
= gen_label_rtx ();
5723 cmp_reg
= gen_reg_rtx (mode
);
5725 emit_move_insn (cmp_reg
, mem
);
5727 emit_move_insn (old_reg
, cmp_reg
);
5731 /* If the target supports a compare-and-swap pattern that simultaneously
5732 sets some flag for success, then use it. Otherwise use the regular
5733 compare-and-swap and follow that immediately with a compare insn. */
5734 icode
= sync_compare_and_swap_cc
[mode
];
5738 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5740 if (subtarget
!= NULL_RTX
)
5742 gcc_assert (subtarget
== cmp_reg
);
5747 case CODE_FOR_nothing
:
5748 icode
= sync_compare_and_swap
[mode
];
5749 if (icode
== CODE_FOR_nothing
)
5752 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5754 if (subtarget
== NULL_RTX
)
5756 if (subtarget
!= cmp_reg
)
5757 emit_move_insn (cmp_reg
, subtarget
);
5759 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
5762 /* ??? Mark this jump predicted not taken? */
5763 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
5768 /* This function generates the atomic operation MEM CODE= VAL. In this
5769 case, we do not care about any resulting value. Returns NULL if we
5770 cannot generate the operation. */
5773 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
5775 enum machine_mode mode
= GET_MODE (mem
);
5776 enum insn_code icode
;
5779 /* Look to see if the target supports the operation directly. */
5783 icode
= sync_add_optab
[mode
];
5786 icode
= sync_ior_optab
[mode
];
5789 icode
= sync_xor_optab
[mode
];
5792 icode
= sync_and_optab
[mode
];
5795 icode
= sync_nand_optab
[mode
];
5799 icode
= sync_sub_optab
[mode
];
5800 if (icode
== CODE_FOR_nothing
)
5802 icode
= sync_add_optab
[mode
];
5803 if (icode
!= CODE_FOR_nothing
)
5805 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5815 /* Generate the direct operation, if present. */
5816 if (icode
!= CODE_FOR_nothing
)
5818 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5819 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5820 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
5821 val
= force_reg (mode
, val
);
5823 insn
= GEN_FCN (icode
) (mem
, val
);
5831 /* Failing that, generate a compare-and-swap loop in which we perform the
5832 operation with normal arithmetic instructions. */
5833 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5835 rtx t0
= gen_reg_rtx (mode
), t1
;
5842 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5845 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5846 true, OPTAB_LIB_WIDEN
);
5848 insn
= get_insns ();
5851 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5858 /* This function generates the atomic operation MEM CODE= VAL. In this
5859 case, we do care about the resulting value: if AFTER is true then
5860 return the value MEM holds after the operation, if AFTER is false
5861 then return the value MEM holds before the operation. TARGET is an
5862 optional place for the result value to be stored. */
5865 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
5866 bool after
, rtx target
)
5868 enum machine_mode mode
= GET_MODE (mem
);
5869 enum insn_code old_code
, new_code
, icode
;
5873 /* Look to see if the target supports the operation directly. */
5877 old_code
= sync_old_add_optab
[mode
];
5878 new_code
= sync_new_add_optab
[mode
];
5881 old_code
= sync_old_ior_optab
[mode
];
5882 new_code
= sync_new_ior_optab
[mode
];
5885 old_code
= sync_old_xor_optab
[mode
];
5886 new_code
= sync_new_xor_optab
[mode
];
5889 old_code
= sync_old_and_optab
[mode
];
5890 new_code
= sync_new_and_optab
[mode
];
5893 old_code
= sync_old_nand_optab
[mode
];
5894 new_code
= sync_new_nand_optab
[mode
];
5898 old_code
= sync_old_sub_optab
[mode
];
5899 new_code
= sync_new_sub_optab
[mode
];
5900 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
5902 old_code
= sync_old_add_optab
[mode
];
5903 new_code
= sync_new_add_optab
[mode
];
5904 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
5906 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5916 /* If the target does supports the proper new/old operation, great. But
5917 if we only support the opposite old/new operation, check to see if we
5918 can compensate. In the case in which the old value is supported, then
5919 we can always perform the operation again with normal arithmetic. In
5920 the case in which the new value is supported, then we can only handle
5921 this in the case the operation is reversible. */
5926 if (icode
== CODE_FOR_nothing
)
5929 if (icode
!= CODE_FOR_nothing
)
5936 if (icode
== CODE_FOR_nothing
5937 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
5940 if (icode
!= CODE_FOR_nothing
)
5945 /* If we found something supported, great. */
5946 if (icode
!= CODE_FOR_nothing
)
5948 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5949 target
= gen_reg_rtx (mode
);
5951 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5952 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5953 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5954 val
= force_reg (mode
, val
);
5956 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5961 /* If we need to compensate for using an operation with the
5962 wrong return value, do so now. */
5969 else if (code
== MINUS
)
5974 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
5975 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
5976 true, OPTAB_LIB_WIDEN
);
5983 /* Failing that, generate a compare-and-swap loop in which we perform the
5984 operation with normal arithmetic instructions. */
5985 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5987 rtx t0
= gen_reg_rtx (mode
), t1
;
5989 if (!target
|| !register_operand (target
, mode
))
5990 target
= gen_reg_rtx (mode
);
5995 emit_move_insn (target
, t0
);
5999 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
6002 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
6003 true, OPTAB_LIB_WIDEN
);
6005 emit_move_insn (target
, t1
);
6007 insn
= get_insns ();
6010 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6017 /* This function expands a test-and-set operation. Ideally we atomically
6018 store VAL in MEM and return the previous value in MEM. Some targets
6019 may not support this operation and only support VAL with the constant 1;
6020 in this case while the return value will be 0/1, but the exact value
6021 stored in MEM is target defined. TARGET is an option place to stick
6022 the return value. */
6025 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
6027 enum machine_mode mode
= GET_MODE (mem
);
6028 enum insn_code icode
;
6031 /* If the target supports the test-and-set directly, great. */
6032 icode
= sync_lock_test_and_set
[mode
];
6033 if (icode
!= CODE_FOR_nothing
)
6035 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
6036 target
= gen_reg_rtx (mode
);
6038 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6039 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6040 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
6041 val
= force_reg (mode
, val
);
6043 insn
= GEN_FCN (icode
) (target
, mem
, val
);
6051 /* Otherwise, use a compare-and-swap loop for the exchange. */
6052 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
6054 if (!target
|| !register_operand (target
, mode
))
6055 target
= gen_reg_rtx (mode
);
6056 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6057 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6058 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6065 #include "gt-optabs.h"