1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
149 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
151 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code
) != RTX_COMPARE
155 && GET_RTX_CLASS (code
) != RTX_UNARY
)
158 if (GET_CODE (target
) == ZERO_EXTRACT
)
161 for (last_insn
= insns
;
162 NEXT_INSN (last_insn
) != NULL_RTX
;
163 last_insn
= NEXT_INSN (last_insn
))
166 set
= single_set (last_insn
);
170 if (! rtx_equal_p (SET_DEST (set
), target
)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target
, op0
)
179 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
181 insn
= PREV_INSN (last_insn
);
182 while (insn
!= NULL_RTX
)
184 if (reg_set_p (target
, insn
))
187 insn
= PREV_INSN (insn
);
191 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
192 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
194 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
196 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
209 int unsignedp
, int no_extend
)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
222 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
223 return convert_modes (mode
, oldmode
, op
, unsignedp
);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
228 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result
= gen_reg_rtx (mode
);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
235 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code
, tree type
)
257 return one_cmpl_optab
;
266 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
274 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
280 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
289 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
292 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
294 case REALIGN_LOAD_EXPR
:
295 return vec_realign_load_optab
;
301 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
305 return trapv
? addv_optab
: add_optab
;
308 return trapv
? subv_optab
: sub_optab
;
311 return trapv
? smulv_optab
: smul_optab
;
314 return trapv
? negv_optab
: neg_optab
;
317 return trapv
? absv_optab
: abs_optab
;
325 /* Generate code to perform an operation specified by TERNARY_OPTAB
326 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
328 UNSIGNEDP is for the case where we have to widen the operands
329 to perform the operation. It says to use zero-extension.
331 If TARGET is nonzero, the value
332 is generated there, if it is convenient to do so.
333 In all cases an rtx is returned for the locus of the value;
334 this may or may not be TARGET. */
337 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
338 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
340 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
341 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
342 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
343 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
346 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
348 gcc_assert (ternary_optab
->handlers
[(int) mode
].insn_code
349 != CODE_FOR_nothing
);
351 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
352 temp
= gen_reg_rtx (mode
);
356 /* In case the insn wants input operands in modes different from
357 those of the actual operands, convert the operands. It would
358 seem that we don't need to convert CONST_INTs, but we do, so
359 that they're properly zero-extended, sign-extended or truncated
362 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
363 xop0
= convert_modes (mode0
,
364 GET_MODE (op0
) != VOIDmode
369 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
370 xop1
= convert_modes (mode1
,
371 GET_MODE (op1
) != VOIDmode
376 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
377 xop2
= convert_modes (mode2
,
378 GET_MODE (op2
) != VOIDmode
383 /* Now, if insn's predicates don't allow our operands, put them into
386 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
387 && mode0
!= VOIDmode
)
388 xop0
= copy_to_mode_reg (mode0
, xop0
);
390 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
391 && mode1
!= VOIDmode
)
392 xop1
= copy_to_mode_reg (mode1
, xop1
);
394 if (!insn_data
[icode
].operand
[3].predicate (xop2
, mode2
)
395 && mode2
!= VOIDmode
)
396 xop2
= copy_to_mode_reg (mode2
, xop2
);
398 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
405 /* Like expand_binop, but return a constant rtx if the result can be
406 calculated at compile time. The arguments and return value are
407 otherwise the same as for expand_binop. */
410 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
411 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
412 enum optab_methods methods
)
414 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
415 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
417 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
420 /* Like simplify_expand_binop, but always put the result in TARGET.
421 Return true if the expansion succeeded. */
424 force_expand_binop (enum machine_mode mode
, optab binoptab
,
425 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
426 enum optab_methods methods
)
428 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
429 target
, unsignedp
, methods
);
433 emit_move_insn (target
, x
);
437 /* This subroutine of expand_doubleword_shift handles the cases in which
438 the effective shift value is >= BITS_PER_WORD. The arguments and return
439 value are the same as for the parent routine, except that SUPERWORD_OP1
440 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
441 INTO_TARGET may be null if the caller has decided to calculate it. */
444 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
445 rtx outof_target
, rtx into_target
,
446 int unsignedp
, enum optab_methods methods
)
448 if (into_target
!= 0)
449 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
450 into_target
, unsignedp
, methods
))
453 if (outof_target
!= 0)
455 /* For a signed right shift, we must fill OUTOF_TARGET with copies
456 of the sign bit, otherwise we must fill it with zeros. */
457 if (binoptab
!= ashr_optab
)
458 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
460 if (!force_expand_binop (word_mode
, binoptab
,
461 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
462 outof_target
, unsignedp
, methods
))
468 /* This subroutine of expand_doubleword_shift handles the cases in which
469 the effective shift value is < BITS_PER_WORD. The arguments and return
470 value are the same as for the parent routine. */
473 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
474 rtx outof_input
, rtx into_input
, rtx op1
,
475 rtx outof_target
, rtx into_target
,
476 int unsignedp
, enum optab_methods methods
,
477 unsigned HOST_WIDE_INT shift_mask
)
479 optab reverse_unsigned_shift
, unsigned_shift
;
482 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
483 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
485 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
486 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
487 the opposite direction to BINOPTAB. */
488 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
490 carries
= outof_input
;
491 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
492 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
497 /* We must avoid shifting by BITS_PER_WORD bits since that is either
498 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
499 has unknown behavior. Do a single shift first, then shift by the
500 remainder. It's OK to use ~OP1 as the remainder if shift counts
501 are truncated to the mode size. */
502 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
503 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
504 if (shift_mask
== BITS_PER_WORD
- 1)
506 tmp
= immed_double_const (-1, -1, op1_mode
);
507 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
512 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
513 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
517 if (tmp
== 0 || carries
== 0)
519 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
520 carries
, tmp
, 0, unsignedp
, methods
);
524 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
525 so the result can go directly into INTO_TARGET if convenient. */
526 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
527 into_target
, unsignedp
, methods
);
531 /* Now OR in the bits carried over from OUTOF_INPUT. */
532 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
533 into_target
, unsignedp
, methods
))
536 /* Use a standard word_mode shift for the out-of half. */
537 if (outof_target
!= 0)
538 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
539 outof_target
, unsignedp
, methods
))
546 #ifdef HAVE_conditional_move
547 /* Try implementing expand_doubleword_shift using conditional moves.
548 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
549 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
550 are the shift counts to use in the former and latter case. All other
551 arguments are the same as the parent routine. */
554 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
555 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
556 rtx outof_input
, rtx into_input
,
557 rtx subword_op1
, rtx superword_op1
,
558 rtx outof_target
, rtx into_target
,
559 int unsignedp
, enum optab_methods methods
,
560 unsigned HOST_WIDE_INT shift_mask
)
562 rtx outof_superword
, into_superword
;
564 /* Put the superword version of the output into OUTOF_SUPERWORD and
566 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
567 if (outof_target
!= 0 && subword_op1
== superword_op1
)
569 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
570 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
571 into_superword
= outof_target
;
572 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
573 outof_superword
, 0, unsignedp
, methods
))
578 into_superword
= gen_reg_rtx (word_mode
);
579 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
580 outof_superword
, into_superword
,
585 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
586 if (!expand_subword_shift (op1_mode
, binoptab
,
587 outof_input
, into_input
, subword_op1
,
588 outof_target
, into_target
,
589 unsignedp
, methods
, shift_mask
))
592 /* Select between them. Do the INTO half first because INTO_SUPERWORD
593 might be the current value of OUTOF_TARGET. */
594 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
595 into_target
, into_superword
, word_mode
, false))
598 if (outof_target
!= 0)
599 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
600 outof_target
, outof_superword
,
608 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
609 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
610 input operand; the shift moves bits in the direction OUTOF_INPUT->
611 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
612 of the target. OP1 is the shift count and OP1_MODE is its mode.
613 If OP1 is constant, it will have been truncated as appropriate
614 and is known to be nonzero.
616 If SHIFT_MASK is zero, the result of word shifts is undefined when the
617 shift count is outside the range [0, BITS_PER_WORD). This routine must
618 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
620 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
621 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
622 fill with zeros or sign bits as appropriate.
624 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
625 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
626 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
627 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
630 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
631 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
632 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
633 function wants to calculate it itself.
635 Return true if the shift could be successfully synthesized. */
638 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
639 rtx outof_input
, rtx into_input
, rtx op1
,
640 rtx outof_target
, rtx into_target
,
641 int unsignedp
, enum optab_methods methods
,
642 unsigned HOST_WIDE_INT shift_mask
)
644 rtx superword_op1
, tmp
, cmp1
, cmp2
;
645 rtx subword_label
, done_label
;
646 enum rtx_code cmp_code
;
648 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
649 fill the result with sign or zero bits as appropriate. If so, the value
650 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
651 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
652 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
654 This isn't worthwhile for constant shifts since the optimizers will
655 cope better with in-range shift counts. */
656 if (shift_mask
>= BITS_PER_WORD
658 && !CONSTANT_P (op1
))
660 if (!expand_doubleword_shift (op1_mode
, binoptab
,
661 outof_input
, into_input
, op1
,
663 unsignedp
, methods
, shift_mask
))
665 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
666 outof_target
, unsignedp
, methods
))
671 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
672 is true when the effective shift value is less than BITS_PER_WORD.
673 Set SUPERWORD_OP1 to the shift count that should be used to shift
674 OUTOF_INPUT into INTO_TARGET when the condition is false. */
675 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
676 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
678 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
679 is a subword shift count. */
680 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
682 cmp2
= CONST0_RTX (op1_mode
);
688 /* Set CMP1 to OP1 - BITS_PER_WORD. */
689 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
691 cmp2
= CONST0_RTX (op1_mode
);
693 superword_op1
= cmp1
;
698 /* If we can compute the condition at compile time, pick the
699 appropriate subroutine. */
700 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
701 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
703 if (tmp
== const0_rtx
)
704 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
705 outof_target
, into_target
,
708 return expand_subword_shift (op1_mode
, binoptab
,
709 outof_input
, into_input
, op1
,
710 outof_target
, into_target
,
711 unsignedp
, methods
, shift_mask
);
714 #ifdef HAVE_conditional_move
715 /* Try using conditional moves to generate straight-line code. */
717 rtx start
= get_last_insn ();
718 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
719 cmp_code
, cmp1
, cmp2
,
720 outof_input
, into_input
,
722 outof_target
, into_target
,
723 unsignedp
, methods
, shift_mask
))
725 delete_insns_since (start
);
729 /* As a last resort, use branches to select the correct alternative. */
730 subword_label
= gen_label_rtx ();
731 done_label
= gen_label_rtx ();
733 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
734 0, 0, subword_label
);
736 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
737 outof_target
, into_target
,
741 emit_jump_insn (gen_jump (done_label
));
743 emit_label (subword_label
);
745 if (!expand_subword_shift (op1_mode
, binoptab
,
746 outof_input
, into_input
, op1
,
747 outof_target
, into_target
,
748 unsignedp
, methods
, shift_mask
))
751 emit_label (done_label
);
755 /* Subroutine of expand_binop. Perform a double word multiplication of
756 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
757 as the target's word_mode. This function return NULL_RTX if anything
758 goes wrong, in which case it may have already emitted instructions
759 which need to be deleted.
761 If we want to multiply two two-word values and have normal and widening
762 multiplies of single-word values, we can do this with three smaller
763 multiplications. Note that we do not make a REG_NO_CONFLICT block here
764 because we are not operating on one word at a time.
766 The multiplication proceeds as follows:
767 _______________________
768 [__op0_high_|__op0_low__]
769 _______________________
770 * [__op1_high_|__op1_low__]
771 _______________________________________________
772 _______________________
773 (1) [__op0_low__*__op1_low__]
774 _______________________
775 (2a) [__op0_low__*__op1_high_]
776 _______________________
777 (2b) [__op0_high_*__op1_low__]
778 _______________________
779 (3) [__op0_high_*__op1_high_]
782 This gives a 4-word result. Since we are only interested in the
783 lower 2 words, partial result (3) and the upper words of (2a) and
784 (2b) don't need to be calculated. Hence (2a) and (2b) can be
785 calculated using non-widening multiplication.
787 (1), however, needs to be calculated with an unsigned widening
788 multiplication. If this operation is not directly supported we
789 try using a signed widening multiplication and adjust the result.
790 This adjustment works as follows:
792 If both operands are positive then no adjustment is needed.
794 If the operands have different signs, for example op0_low < 0 and
795 op1_low >= 0, the instruction treats the most significant bit of
796 op0_low as a sign bit instead of a bit with significance
797 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
798 with 2**BITS_PER_WORD - op0_low, and two's complements the
799 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
802 Similarly, if both operands are negative, we need to add
803 (op0_low + op1_low) * 2**BITS_PER_WORD.
805 We use a trick to adjust quickly. We logically shift op0_low right
806 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
807 op0_high (op1_high) before it is used to calculate 2b (2a). If no
808 logical shift exists, we do an arithmetic right shift and subtract
812 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
813 bool umulp
, enum optab_methods methods
)
815 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
816 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
817 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
818 rtx product
, adjust
, product_high
, temp
;
820 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
821 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
822 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
823 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
825 /* If we're using an unsigned multiply to directly compute the product
826 of the low-order words of the operands and perform any required
827 adjustments of the operands, we begin by trying two more multiplications
828 and then computing the appropriate sum.
830 We have checked above that the required addition is provided.
831 Full-word addition will normally always succeed, especially if
832 it is provided at all, so we don't worry about its failure. The
833 multiplication may well fail, however, so we do handle that. */
837 /* ??? This could be done with emit_store_flag where available. */
838 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
839 NULL_RTX
, 1, methods
);
841 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
842 NULL_RTX
, 0, OPTAB_DIRECT
);
845 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
846 NULL_RTX
, 0, methods
);
849 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
850 NULL_RTX
, 0, OPTAB_DIRECT
);
857 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
858 NULL_RTX
, 0, OPTAB_DIRECT
);
862 /* OP0_HIGH should now be dead. */
866 /* ??? This could be done with emit_store_flag where available. */
867 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
868 NULL_RTX
, 1, methods
);
870 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
871 NULL_RTX
, 0, OPTAB_DIRECT
);
874 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
875 NULL_RTX
, 0, methods
);
878 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
879 NULL_RTX
, 0, OPTAB_DIRECT
);
886 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
887 NULL_RTX
, 0, OPTAB_DIRECT
);
891 /* OP1_HIGH should now be dead. */
893 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
894 adjust
, 0, OPTAB_DIRECT
);
896 if (target
&& !REG_P (target
))
900 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
901 target
, 1, OPTAB_DIRECT
);
903 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
904 target
, 1, OPTAB_DIRECT
);
909 product_high
= operand_subword (product
, high
, 1, mode
);
910 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
911 REG_P (product_high
) ? product_high
: adjust
,
913 emit_move_insn (product_high
, adjust
);
917 /* Wrapper around expand_binop which takes an rtx code to specify
918 the operation to perform, not an optab pointer. All other
919 arguments are the same. */
921 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
922 rtx op1
, rtx target
, int unsignedp
,
923 enum optab_methods methods
)
925 optab binop
= code_to_optab
[(int) code
];
928 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
931 /* Generate code to perform an operation specified by BINOPTAB
932 on operands OP0 and OP1, with result having machine-mode MODE.
934 UNSIGNEDP is for the case where we have to widen the operands
935 to perform the operation. It says to use zero-extension.
937 If TARGET is nonzero, the value
938 is generated there, if it is convenient to do so.
939 In all cases an rtx is returned for the locus of the value;
940 this may or may not be TARGET. */
943 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
944 rtx target
, int unsignedp
, enum optab_methods methods
)
946 enum optab_methods next_methods
947 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
948 ? OPTAB_WIDEN
: methods
);
949 enum mode_class
class;
950 enum machine_mode wider_mode
;
952 int commutative_op
= 0;
953 int shift_op
= (binoptab
->code
== ASHIFT
954 || binoptab
->code
== ASHIFTRT
955 || binoptab
->code
== LSHIFTRT
956 || binoptab
->code
== ROTATE
957 || binoptab
->code
== ROTATERT
);
958 rtx entry_last
= get_last_insn ();
961 class = GET_MODE_CLASS (mode
);
965 /* Load duplicate non-volatile operands once. */
966 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
968 op0
= force_not_mem (op0
);
973 op0
= force_not_mem (op0
);
974 op1
= force_not_mem (op1
);
978 /* If subtracting an integer constant, convert this into an addition of
979 the negated constant. */
981 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
983 op1
= negate_rtx (mode
, op1
);
984 binoptab
= add_optab
;
987 /* If we are inside an appropriately-short loop and we are optimizing,
988 force expensive constants into a register. */
989 if (CONSTANT_P (op0
) && optimize
990 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
992 if (GET_MODE (op0
) != VOIDmode
)
993 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
994 op0
= force_reg (mode
, op0
);
997 if (CONSTANT_P (op1
) && optimize
998 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1000 if (GET_MODE (op1
) != VOIDmode
)
1001 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1002 op1
= force_reg (mode
, op1
);
1005 /* Record where to delete back to if we backtrack. */
1006 last
= get_last_insn ();
1008 /* If operation is commutative,
1009 try to make the first operand a register.
1010 Even better, try to make it the same as the target.
1011 Also try to make the last operand a constant. */
1012 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1013 || binoptab
== smul_widen_optab
1014 || binoptab
== umul_widen_optab
1015 || binoptab
== smul_highpart_optab
1016 || binoptab
== umul_highpart_optab
)
1020 if (((target
== 0 || REG_P (target
))
1024 : rtx_equal_p (op1
, target
))
1025 || GET_CODE (op0
) == CONST_INT
)
1033 /* If we can do it with a three-operand insn, do so. */
1035 if (methods
!= OPTAB_MUST_WIDEN
1036 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1038 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1039 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1040 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1042 rtx xop0
= op0
, xop1
= op1
;
1047 temp
= gen_reg_rtx (mode
);
1049 /* If it is a commutative operator and the modes would match
1050 if we would swap the operands, we can save the conversions. */
1053 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1054 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1058 tmp
= op0
; op0
= op1
; op1
= tmp
;
1059 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1063 /* In case the insn wants input operands in modes different from
1064 those of the actual operands, convert the operands. It would
1065 seem that we don't need to convert CONST_INTs, but we do, so
1066 that they're properly zero-extended, sign-extended or truncated
1069 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1070 xop0
= convert_modes (mode0
,
1071 GET_MODE (op0
) != VOIDmode
1076 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1077 xop1
= convert_modes (mode1
,
1078 GET_MODE (op1
) != VOIDmode
1083 /* Now, if insn's predicates don't allow our operands, put them into
1086 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
)
1087 && mode0
!= VOIDmode
)
1088 xop0
= copy_to_mode_reg (mode0
, xop0
);
1090 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
)
1091 && mode1
!= VOIDmode
)
1092 xop1
= copy_to_mode_reg (mode1
, xop1
);
1094 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
1095 temp
= gen_reg_rtx (mode
);
1097 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1100 /* If PAT is composed of more than one insn, try to add an appropriate
1101 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1102 operand, call ourselves again, this time without a target. */
1103 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1104 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1106 delete_insns_since (last
);
1107 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1108 unsignedp
, methods
);
1115 delete_insns_since (last
);
1118 /* If this is a multiply, see if we can do a widening operation that
1119 takes operands of this mode and makes a wider mode. */
1121 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1122 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1123 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1124 != CODE_FOR_nothing
))
1126 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1127 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1128 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1132 if (GET_MODE_CLASS (mode
) == MODE_INT
)
1133 return gen_lowpart (mode
, temp
);
1135 return convert_to_mode (mode
, temp
, unsignedp
);
1139 /* Look for a wider mode of the same class for which we think we
1140 can open-code the operation. Check for a widening multiply at the
1141 wider mode as well. */
1143 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1144 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1145 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1146 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1148 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1149 || (binoptab
== smul_optab
1150 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1151 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1152 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1153 != CODE_FOR_nothing
)))
1155 rtx xop0
= op0
, xop1
= op1
;
1158 /* For certain integer operations, we need not actually extend
1159 the narrow operands, as long as we will truncate
1160 the results to the same narrowness. */
1162 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1163 || binoptab
== xor_optab
1164 || binoptab
== add_optab
|| binoptab
== sub_optab
1165 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1166 && class == MODE_INT
)
1169 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1171 /* The second operand of a shift must always be extended. */
1172 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1173 no_extend
&& binoptab
!= ashl_optab
);
1175 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1176 unsignedp
, OPTAB_DIRECT
);
1179 if (class != MODE_INT
)
1182 target
= gen_reg_rtx (mode
);
1183 convert_move (target
, temp
, 0);
1187 return gen_lowpart (mode
, temp
);
1190 delete_insns_since (last
);
1194 /* These can be done a word at a time. */
1195 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1196 && class == MODE_INT
1197 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1198 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1204 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1205 won't be accurate, so use a new target. */
1206 if (target
== 0 || target
== op0
|| target
== op1
)
1207 target
= gen_reg_rtx (mode
);
1211 /* Do the actual arithmetic. */
1212 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1214 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1215 rtx x
= expand_binop (word_mode
, binoptab
,
1216 operand_subword_force (op0
, i
, mode
),
1217 operand_subword_force (op1
, i
, mode
),
1218 target_piece
, unsignedp
, next_methods
);
1223 if (target_piece
!= x
)
1224 emit_move_insn (target_piece
, x
);
1227 insns
= get_insns ();
1230 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1232 if (binoptab
->code
!= UNKNOWN
)
1234 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1235 copy_rtx (op0
), copy_rtx (op1
));
1239 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1244 /* Synthesize double word shifts from single word shifts. */
1245 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1246 || binoptab
== ashr_optab
)
1247 && class == MODE_INT
1248 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1249 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1250 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1251 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1252 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1254 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1255 enum machine_mode op1_mode
;
1257 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1258 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1259 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1261 /* Apply the truncation to constant shifts. */
1262 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1263 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1265 if (op1
== CONST0_RTX (op1_mode
))
1268 /* Make sure that this is a combination that expand_doubleword_shift
1269 can handle. See the comments there for details. */
1270 if (double_shift_mask
== 0
1271 || (shift_mask
== BITS_PER_WORD
- 1
1272 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1274 rtx insns
, equiv_value
;
1275 rtx into_target
, outof_target
;
1276 rtx into_input
, outof_input
;
1277 int left_shift
, outof_word
;
1279 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1280 won't be accurate, so use a new target. */
1281 if (target
== 0 || target
== op0
|| target
== op1
)
1282 target
= gen_reg_rtx (mode
);
1286 /* OUTOF_* is the word we are shifting bits away from, and
1287 INTO_* is the word that we are shifting bits towards, thus
1288 they differ depending on the direction of the shift and
1289 WORDS_BIG_ENDIAN. */
1291 left_shift
= binoptab
== ashl_optab
;
1292 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1294 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1295 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1297 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1298 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1300 if (expand_doubleword_shift (op1_mode
, binoptab
,
1301 outof_input
, into_input
, op1
,
1302 outof_target
, into_target
,
1303 unsignedp
, methods
, shift_mask
))
1305 insns
= get_insns ();
1308 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1309 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1316 /* Synthesize double word rotates from single word shifts. */
1317 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1318 && class == MODE_INT
1319 && GET_CODE (op1
) == CONST_INT
1320 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1321 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1322 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1324 rtx insns
, equiv_value
;
1325 rtx into_target
, outof_target
;
1326 rtx into_input
, outof_input
;
1328 int shift_count
, left_shift
, outof_word
;
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. Do this also if target is not
1332 a REG, first because having a register instead may open optimization
1333 opportunities, and second because if target and op0 happen to be MEMs
1334 designating the same location, we would risk clobbering it too early
1335 in the code sequence we generate below. */
1336 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1337 target
= gen_reg_rtx (mode
);
1341 shift_count
= INTVAL (op1
);
1343 /* OUTOF_* is the word we are shifting bits away from, and
1344 INTO_* is the word that we are shifting bits towards, thus
1345 they differ depending on the direction of the shift and
1346 WORDS_BIG_ENDIAN. */
1348 left_shift
= (binoptab
== rotl_optab
);
1349 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1351 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1352 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1354 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1355 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1357 if (shift_count
== BITS_PER_WORD
)
1359 /* This is just a word swap. */
1360 emit_move_insn (outof_target
, into_input
);
1361 emit_move_insn (into_target
, outof_input
);
1366 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1367 rtx first_shift_count
, second_shift_count
;
1368 optab reverse_unsigned_shift
, unsigned_shift
;
1370 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1371 ? lshr_optab
: ashl_optab
);
1373 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1374 ? ashl_optab
: lshr_optab
);
1376 if (shift_count
> BITS_PER_WORD
)
1378 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1379 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1383 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1384 second_shift_count
= GEN_INT (shift_count
);
1387 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1388 outof_input
, first_shift_count
,
1389 NULL_RTX
, unsignedp
, next_methods
);
1390 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1391 into_input
, second_shift_count
,
1392 NULL_RTX
, unsignedp
, next_methods
);
1394 if (into_temp1
!= 0 && into_temp2
!= 0)
1395 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1396 into_target
, unsignedp
, next_methods
);
1400 if (inter
!= 0 && inter
!= into_target
)
1401 emit_move_insn (into_target
, inter
);
1403 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1404 into_input
, first_shift_count
,
1405 NULL_RTX
, unsignedp
, next_methods
);
1406 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1407 outof_input
, second_shift_count
,
1408 NULL_RTX
, unsignedp
, next_methods
);
1410 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1411 inter
= expand_binop (word_mode
, ior_optab
,
1412 outof_temp1
, outof_temp2
,
1413 outof_target
, unsignedp
, next_methods
);
1415 if (inter
!= 0 && inter
!= outof_target
)
1416 emit_move_insn (outof_target
, inter
);
1419 insns
= get_insns ();
1424 if (binoptab
->code
!= UNKNOWN
)
1425 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1429 /* We can't make this a no conflict block if this is a word swap,
1430 because the word swap case fails if the input and output values
1431 are in the same register. */
1432 if (shift_count
!= BITS_PER_WORD
)
1433 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1442 /* These can be done a word at a time by propagating carries. */
1443 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1444 && class == MODE_INT
1445 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1446 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1449 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1450 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1451 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1452 rtx xop0
, xop1
, xtarget
;
1454 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1455 value is one of those, use it. Otherwise, use 1 since it is the
1456 one easiest to get. */
1457 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1458 int normalizep
= STORE_FLAG_VALUE
;
1463 /* Prepare the operands. */
1464 xop0
= force_reg (mode
, op0
);
1465 xop1
= force_reg (mode
, op1
);
1467 xtarget
= gen_reg_rtx (mode
);
1469 if (target
== 0 || !REG_P (target
))
1472 /* Indicate for flow that the entire target reg is being set. */
1474 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1476 /* Do the actual arithmetic. */
1477 for (i
= 0; i
< nwords
; i
++)
1479 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1480 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1481 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1482 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1485 /* Main add/subtract of the input operands. */
1486 x
= expand_binop (word_mode
, binoptab
,
1487 op0_piece
, op1_piece
,
1488 target_piece
, unsignedp
, next_methods
);
1494 /* Store carry from main add/subtract. */
1495 carry_out
= gen_reg_rtx (word_mode
);
1496 carry_out
= emit_store_flag_force (carry_out
,
1497 (binoptab
== add_optab
1500 word_mode
, 1, normalizep
);
1507 /* Add/subtract previous carry to main result. */
1508 newx
= expand_binop (word_mode
,
1509 normalizep
== 1 ? binoptab
: otheroptab
,
1511 NULL_RTX
, 1, next_methods
);
1515 /* Get out carry from adding/subtracting carry in. */
1516 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1517 carry_tmp
= emit_store_flag_force (carry_tmp
,
1518 (binoptab
== add_optab
1521 word_mode
, 1, normalizep
);
1523 /* Logical-ior the two poss. carry together. */
1524 carry_out
= expand_binop (word_mode
, ior_optab
,
1525 carry_out
, carry_tmp
,
1526 carry_out
, 0, next_methods
);
1530 emit_move_insn (target_piece
, newx
);
1534 if (x
!= target_piece
)
1535 emit_move_insn (target_piece
, x
);
1538 carry_in
= carry_out
;
1541 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1543 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1544 || ! rtx_equal_p (target
, xtarget
))
1546 rtx temp
= emit_move_insn (target
, xtarget
);
1548 set_unique_reg_note (temp
,
1550 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1561 delete_insns_since (last
);
1564 /* Attempt to synthesize double word multiplies using a sequence of word
1565 mode multiplications. We first attempt to generate a sequence using a
1566 more efficient unsigned widening multiply, and if that fails we then
1567 try using a signed widening multiply. */
1569 if (binoptab
== smul_optab
1570 && class == MODE_INT
1571 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1572 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1573 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1575 rtx product
= NULL_RTX
;
1577 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1578 != CODE_FOR_nothing
)
1580 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1583 delete_insns_since (last
);
1586 if (product
== NULL_RTX
1587 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1588 != CODE_FOR_nothing
)
1590 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1593 delete_insns_since (last
);
1596 if (product
!= NULL_RTX
)
1598 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1600 temp
= emit_move_insn (target
? target
: product
, product
);
1601 set_unique_reg_note (temp
,
1603 gen_rtx_fmt_ee (MULT
, mode
,
1611 /* It can't be open-coded in this mode.
1612 Use a library call if one is available and caller says that's ok. */
1614 if (binoptab
->handlers
[(int) mode
].libfunc
1615 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1619 enum machine_mode op1_mode
= mode
;
1626 op1_mode
= word_mode
;
1627 /* Specify unsigned here,
1628 since negative shift counts are meaningless. */
1629 op1x
= convert_to_mode (word_mode
, op1
, 1);
1632 if (GET_MODE (op0
) != VOIDmode
1633 && GET_MODE (op0
) != mode
)
1634 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1636 /* Pass 1 for NO_QUEUE so we don't lose any increments
1637 if the libcall is cse'd or moved. */
1638 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1639 NULL_RTX
, LCT_CONST
, mode
, 2,
1640 op0
, mode
, op1x
, op1_mode
);
1642 insns
= get_insns ();
1645 target
= gen_reg_rtx (mode
);
1646 emit_libcall_block (insns
, target
, value
,
1647 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1652 delete_insns_since (last
);
1654 /* It can't be done in this mode. Can we do it in a wider mode? */
1656 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1657 || methods
== OPTAB_MUST_WIDEN
))
1659 /* Caller says, don't even try. */
1660 delete_insns_since (entry_last
);
1664 /* Compute the value of METHODS to pass to recursive calls.
1665 Don't allow widening to be tried recursively. */
1667 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1669 /* Look for a wider mode of the same class for which it appears we can do
1672 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1674 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1675 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1677 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1678 != CODE_FOR_nothing
)
1679 || (methods
== OPTAB_LIB
1680 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1682 rtx xop0
= op0
, xop1
= op1
;
1685 /* For certain integer operations, we need not actually extend
1686 the narrow operands, as long as we will truncate
1687 the results to the same narrowness. */
1689 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1690 || binoptab
== xor_optab
1691 || binoptab
== add_optab
|| binoptab
== sub_optab
1692 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1693 && class == MODE_INT
)
1696 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1697 unsignedp
, no_extend
);
1699 /* The second operand of a shift must always be extended. */
1700 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1701 no_extend
&& binoptab
!= ashl_optab
);
1703 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1704 unsignedp
, methods
);
1707 if (class != MODE_INT
)
1710 target
= gen_reg_rtx (mode
);
1711 convert_move (target
, temp
, 0);
1715 return gen_lowpart (mode
, temp
);
1718 delete_insns_since (last
);
1723 delete_insns_since (entry_last
);
1727 /* Expand a binary operator which has both signed and unsigned forms.
1728 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1731 If we widen unsigned operands, we may use a signed wider operation instead
1732 of an unsigned wider operation, since the result would be the same. */
1735 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1736 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1737 enum optab_methods methods
)
1740 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1741 struct optab wide_soptab
;
1743 /* Do it without widening, if possible. */
1744 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1745 unsignedp
, OPTAB_DIRECT
);
1746 if (temp
|| methods
== OPTAB_DIRECT
)
1749 /* Try widening to a signed int. Make a fake signed optab that
1750 hides any signed insn for direct use. */
1751 wide_soptab
= *soptab
;
1752 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1753 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1755 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1756 unsignedp
, OPTAB_WIDEN
);
1758 /* For unsigned operands, try widening to an unsigned int. */
1759 if (temp
== 0 && unsignedp
)
1760 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1761 unsignedp
, OPTAB_WIDEN
);
1762 if (temp
|| methods
== OPTAB_WIDEN
)
1765 /* Use the right width lib call if that exists. */
1766 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1767 if (temp
|| methods
== OPTAB_LIB
)
1770 /* Must widen and use a lib call, use either signed or unsigned. */
1771 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1772 unsignedp
, methods
);
1776 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1777 unsignedp
, methods
);
1781 /* Generate code to perform an operation specified by UNOPPTAB
1782 on operand OP0, with two results to TARG0 and TARG1.
1783 We assume that the order of the operands for the instruction
1784 is TARG0, TARG1, OP0.
1786 Either TARG0 or TARG1 may be zero, but what that means is that
1787 the result is not actually wanted. We will generate it into
1788 a dummy pseudo-reg and discard it. They may not both be zero.
1790 Returns 1 if this operation can be performed; 0 if not. */
1793 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1796 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1797 enum mode_class
class;
1798 enum machine_mode wider_mode
;
1799 rtx entry_last
= get_last_insn ();
1802 class = GET_MODE_CLASS (mode
);
1805 op0
= force_not_mem (op0
);
1808 targ0
= gen_reg_rtx (mode
);
1810 targ1
= gen_reg_rtx (mode
);
1812 /* Record where to go back to if we fail. */
1813 last
= get_last_insn ();
1815 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1817 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1818 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1822 if (GET_MODE (xop0
) != VOIDmode
1823 && GET_MODE (xop0
) != mode0
)
1824 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1826 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1827 if (!insn_data
[icode
].operand
[2].predicate (xop0
, mode0
))
1828 xop0
= copy_to_mode_reg (mode0
, xop0
);
1830 /* We could handle this, but we should always be called with a pseudo
1831 for our targets and all insns should take them as outputs. */
1832 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
1833 gcc_assert (insn_data
[icode
].operand
[1].predicate (targ1
, mode
));
1835 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1842 delete_insns_since (last
);
1845 /* It can't be done in this mode. Can we do it in a wider mode? */
1847 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1849 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1850 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1852 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1853 != CODE_FOR_nothing
)
1855 rtx t0
= gen_reg_rtx (wider_mode
);
1856 rtx t1
= gen_reg_rtx (wider_mode
);
1857 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1859 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1861 convert_move (targ0
, t0
, unsignedp
);
1862 convert_move (targ1
, t1
, unsignedp
);
1866 delete_insns_since (last
);
1871 delete_insns_since (entry_last
);
1875 /* Generate code to perform an operation specified by BINOPTAB
1876 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1877 We assume that the order of the operands for the instruction
1878 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1879 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1881 Either TARG0 or TARG1 may be zero, but what that means is that
1882 the result is not actually wanted. We will generate it into
1883 a dummy pseudo-reg and discard it. They may not both be zero.
1885 Returns 1 if this operation can be performed; 0 if not. */
1888 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1891 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1892 enum mode_class
class;
1893 enum machine_mode wider_mode
;
1894 rtx entry_last
= get_last_insn ();
1897 class = GET_MODE_CLASS (mode
);
1901 op0
= force_not_mem (op0
);
1902 op1
= force_not_mem (op1
);
1905 /* If we are inside an appropriately-short loop and we are optimizing,
1906 force expensive constants into a register. */
1907 if (CONSTANT_P (op0
) && optimize
1908 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1909 op0
= force_reg (mode
, op0
);
1911 if (CONSTANT_P (op1
) && optimize
1912 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1913 op1
= force_reg (mode
, op1
);
1916 targ0
= gen_reg_rtx (mode
);
1918 targ1
= gen_reg_rtx (mode
);
1920 /* Record where to go back to if we fail. */
1921 last
= get_last_insn ();
1923 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1925 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1926 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1927 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1929 rtx xop0
= op0
, xop1
= op1
;
1931 /* In case the insn wants input operands in modes different from
1932 those of the actual operands, convert the operands. It would
1933 seem that we don't need to convert CONST_INTs, but we do, so
1934 that they're properly zero-extended, sign-extended or truncated
1937 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1938 xop0
= convert_modes (mode0
,
1939 GET_MODE (op0
) != VOIDmode
1944 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1945 xop1
= convert_modes (mode1
,
1946 GET_MODE (op1
) != VOIDmode
1951 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1952 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
1953 xop0
= copy_to_mode_reg (mode0
, xop0
);
1955 if (!insn_data
[icode
].operand
[2].predicate (xop1
, mode1
))
1956 xop1
= copy_to_mode_reg (mode1
, xop1
);
1958 /* We could handle this, but we should always be called with a pseudo
1959 for our targets and all insns should take them as outputs. */
1960 gcc_assert (insn_data
[icode
].operand
[0].predicate (targ0
, mode
));
1961 gcc_assert (insn_data
[icode
].operand
[3].predicate (targ1
, mode
));
1963 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1970 delete_insns_since (last
);
1973 /* It can't be done in this mode. Can we do it in a wider mode? */
1975 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1977 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1978 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1980 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1981 != CODE_FOR_nothing
)
1983 rtx t0
= gen_reg_rtx (wider_mode
);
1984 rtx t1
= gen_reg_rtx (wider_mode
);
1985 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1986 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1988 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1991 convert_move (targ0
, t0
, unsignedp
);
1992 convert_move (targ1
, t1
, unsignedp
);
1996 delete_insns_since (last
);
2001 delete_insns_since (entry_last
);
2005 /* Expand the two-valued library call indicated by BINOPTAB, but
2006 preserve only one of the values. If TARG0 is non-NULL, the first
2007 value is placed into TARG0; otherwise the second value is placed
2008 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2009 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2010 This routine assumes that the value returned by the library call is
2011 as if the return value was of an integral mode twice as wide as the
2012 mode of OP0. Returns 1 if the call was successful. */
2015 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2016 rtx targ0
, rtx targ1
, enum rtx_code code
)
2018 enum machine_mode mode
;
2019 enum machine_mode libval_mode
;
2023 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2024 gcc_assert (!targ0
!= !targ1
);
2026 mode
= GET_MODE (op0
);
2027 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2030 /* The value returned by the library function will have twice as
2031 many bits as the nominal MODE. */
2032 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2035 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2036 NULL_RTX
, LCT_CONST
,
2040 /* Get the part of VAL containing the value that we want. */
2041 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2042 targ0
? 0 : GET_MODE_SIZE (mode
));
2043 insns
= get_insns ();
2045 /* Move the into the desired location. */
2046 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2047 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2053 /* Wrapper around expand_unop which takes an rtx code to specify
2054 the operation to perform, not an optab pointer. All other
2055 arguments are the same. */
2057 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2058 rtx target
, int unsignedp
)
2060 optab unop
= code_to_optab
[(int) code
];
2063 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2069 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2071 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2073 enum mode_class
class = GET_MODE_CLASS (mode
);
2074 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2076 enum machine_mode wider_mode
;
2077 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2078 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2080 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2081 != CODE_FOR_nothing
)
2083 rtx xop0
, temp
, last
;
2085 last
= get_last_insn ();
2088 target
= gen_reg_rtx (mode
);
2089 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2090 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2092 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2093 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2094 - GET_MODE_BITSIZE (mode
)),
2095 target
, true, OPTAB_DIRECT
);
2097 delete_insns_since (last
);
2106 /* Try calculating (parity x) as (and (popcount x) 1), where
2107 popcount can also be done in a wider mode. */
2109 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2111 enum mode_class
class = GET_MODE_CLASS (mode
);
2112 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2114 enum machine_mode wider_mode
;
2115 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2116 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2118 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2119 != CODE_FOR_nothing
)
2121 rtx xop0
, temp
, last
;
2123 last
= get_last_insn ();
2126 target
= gen_reg_rtx (mode
);
2127 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2128 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2131 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2132 target
, true, OPTAB_DIRECT
);
2134 delete_insns_since (last
);
2143 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2144 conditions, VAL may already be a SUBREG against which we cannot generate
2145 a further SUBREG. In this case, we expect forcing the value into a
2146 register will work around the situation. */
2149 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2150 enum machine_mode imode
)
2153 ret
= lowpart_subreg (omode
, val
, imode
);
2156 val
= force_reg (imode
, val
);
2157 ret
= lowpart_subreg (omode
, val
, imode
);
2158 gcc_assert (ret
!= NULL
);
2163 /* Expand a floating point absolute value or negation operation via a
2164 logical operation on the sign bit. */
2167 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2168 rtx op0
, rtx target
)
2170 const struct real_format
*fmt
;
2171 int bitpos
, word
, nwords
, i
;
2172 enum machine_mode imode
;
2173 HOST_WIDE_INT hi
, lo
;
2176 /* The format has to have a simple sign bit. */
2177 fmt
= REAL_MODE_FORMAT (mode
);
2181 bitpos
= fmt
->signbit_rw
;
2185 /* Don't create negative zeros if the format doesn't support them. */
2186 if (code
== NEG
&& !fmt
->has_signed_zero
)
2189 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2191 imode
= int_mode_for_mode (mode
);
2192 if (imode
== BLKmode
)
2201 if (FLOAT_WORDS_BIG_ENDIAN
)
2202 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2204 word
= bitpos
/ BITS_PER_WORD
;
2205 bitpos
= bitpos
% BITS_PER_WORD
;
2206 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2209 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2212 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2216 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2222 if (target
== 0 || target
== op0
)
2223 target
= gen_reg_rtx (mode
);
2229 for (i
= 0; i
< nwords
; ++i
)
2231 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2232 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2236 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2238 immed_double_const (lo
, hi
, imode
),
2239 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2240 if (temp
!= targ_piece
)
2241 emit_move_insn (targ_piece
, temp
);
2244 emit_move_insn (targ_piece
, op0_piece
);
2247 insns
= get_insns ();
2250 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2251 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2255 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2256 gen_lowpart (imode
, op0
),
2257 immed_double_const (lo
, hi
, imode
),
2258 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2259 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2261 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2262 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2268 /* Generate code to perform an operation specified by UNOPTAB
2269 on operand OP0, with result having machine-mode MODE.
2271 UNSIGNEDP is for the case where we have to widen the operands
2272 to perform the operation. It says to use zero-extension.
2274 If TARGET is nonzero, the value
2275 is generated there, if it is convenient to do so.
2276 In all cases an rtx is returned for the locus of the value;
2277 this may or may not be TARGET. */
2280 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2283 enum mode_class
class;
2284 enum machine_mode wider_mode
;
2286 rtx last
= get_last_insn ();
2289 class = GET_MODE_CLASS (mode
);
2292 op0
= force_not_mem (op0
);
2294 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2296 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2297 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2303 temp
= gen_reg_rtx (mode
);
2305 if (GET_MODE (xop0
) != VOIDmode
2306 && GET_MODE (xop0
) != mode0
)
2307 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2309 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2311 if (!insn_data
[icode
].operand
[1].predicate (xop0
, mode0
))
2312 xop0
= copy_to_mode_reg (mode0
, xop0
);
2314 if (!insn_data
[icode
].operand
[0].predicate (temp
, mode
))
2315 temp
= gen_reg_rtx (mode
);
2317 pat
= GEN_FCN (icode
) (temp
, xop0
);
2320 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2321 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2323 delete_insns_since (last
);
2324 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2332 delete_insns_since (last
);
2335 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2337 /* Widening clz needs special treatment. */
2338 if (unoptab
== clz_optab
)
2340 temp
= widen_clz (mode
, op0
, target
);
2347 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2348 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2349 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2351 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2355 /* For certain operations, we need not actually extend
2356 the narrow operand, as long as we will truncate the
2357 results to the same narrowness. */
2359 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2360 (unoptab
== neg_optab
2361 || unoptab
== one_cmpl_optab
)
2362 && class == MODE_INT
);
2364 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2369 if (class != MODE_INT
)
2372 target
= gen_reg_rtx (mode
);
2373 convert_move (target
, temp
, 0);
2377 return gen_lowpart (mode
, temp
);
2380 delete_insns_since (last
);
2384 /* These can be done a word at a time. */
2385 if (unoptab
== one_cmpl_optab
2386 && class == MODE_INT
2387 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2388 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2393 if (target
== 0 || target
== op0
)
2394 target
= gen_reg_rtx (mode
);
2398 /* Do the actual arithmetic. */
2399 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2401 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2402 rtx x
= expand_unop (word_mode
, unoptab
,
2403 operand_subword_force (op0
, i
, mode
),
2404 target_piece
, unsignedp
);
2406 if (target_piece
!= x
)
2407 emit_move_insn (target_piece
, x
);
2410 insns
= get_insns ();
2413 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2414 gen_rtx_fmt_e (unoptab
->code
, mode
,
2419 if (unoptab
->code
== NEG
)
2421 /* Try negating floating point values by flipping the sign bit. */
2422 if (class == MODE_FLOAT
)
2424 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2429 /* If there is no negation pattern, and we have no negative zero,
2430 try subtracting from zero. */
2431 if (!HONOR_SIGNED_ZEROS (mode
))
2433 temp
= expand_binop (mode
, (unoptab
== negv_optab
2434 ? subv_optab
: sub_optab
),
2435 CONST0_RTX (mode
), op0
, target
,
2436 unsignedp
, OPTAB_DIRECT
);
2442 /* Try calculating parity (x) as popcount (x) % 2. */
2443 if (unoptab
== parity_optab
)
2445 temp
= expand_parity (mode
, op0
, target
);
2451 /* Now try a library call in this mode. */
2452 if (unoptab
->handlers
[(int) mode
].libfunc
)
2456 enum machine_mode outmode
= mode
;
2458 /* All of these functions return small values. Thus we choose to
2459 have them return something that isn't a double-word. */
2460 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2461 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2463 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2467 /* Pass 1 for NO_QUEUE so we don't lose any increments
2468 if the libcall is cse'd or moved. */
2469 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2470 NULL_RTX
, LCT_CONST
, outmode
,
2472 insns
= get_insns ();
2475 target
= gen_reg_rtx (outmode
);
2476 emit_libcall_block (insns
, target
, value
,
2477 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2482 /* It can't be done in this mode. Can we do it in a wider mode? */
2484 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2486 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2487 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2489 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2490 != CODE_FOR_nothing
)
2491 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2495 /* For certain operations, we need not actually extend
2496 the narrow operand, as long as we will truncate the
2497 results to the same narrowness. */
2499 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2500 (unoptab
== neg_optab
2501 || unoptab
== one_cmpl_optab
)
2502 && class == MODE_INT
);
2504 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2507 /* If we are generating clz using wider mode, adjust the
2509 if (unoptab
== clz_optab
&& temp
!= 0)
2510 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2511 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2512 - GET_MODE_BITSIZE (mode
)),
2513 target
, true, OPTAB_DIRECT
);
2517 if (class != MODE_INT
)
2520 target
= gen_reg_rtx (mode
);
2521 convert_move (target
, temp
, 0);
2525 return gen_lowpart (mode
, temp
);
2528 delete_insns_since (last
);
2533 /* One final attempt at implementing negation via subtraction,
2534 this time allowing widening of the operand. */
2535 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2538 temp
= expand_binop (mode
,
2539 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2540 CONST0_RTX (mode
), op0
,
2541 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2549 /* Emit code to compute the absolute value of OP0, with result to
2550 TARGET if convenient. (TARGET may be 0.) The return value says
2551 where the result actually is to be found.
2553 MODE is the mode of the operand; the mode of the result is
2554 different but can be deduced from MODE.
2559 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2560 int result_unsignedp
)
2565 result_unsignedp
= 1;
2567 /* First try to do it with a special abs instruction. */
2568 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2573 /* For floating point modes, try clearing the sign bit. */
2574 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2576 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2581 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2582 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2583 && !HONOR_SIGNED_ZEROS (mode
))
2585 rtx last
= get_last_insn ();
2587 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2589 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2595 delete_insns_since (last
);
2598 /* If this machine has expensive jumps, we can do integer absolute
2599 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2600 where W is the width of MODE. */
2602 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2604 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2605 size_int (GET_MODE_BITSIZE (mode
) - 1),
2608 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2611 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2612 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2622 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2623 int result_unsignedp
, int safe
)
2628 result_unsignedp
= 1;
2630 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2634 /* If that does not win, use conditional jump and negate. */
2636 /* It is safe to use the target if it is the same
2637 as the source if this is also a pseudo register */
2638 if (op0
== target
&& REG_P (op0
)
2639 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2642 op1
= gen_label_rtx ();
2643 if (target
== 0 || ! safe
2644 || GET_MODE (target
) != mode
2645 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2647 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2648 target
= gen_reg_rtx (mode
);
2650 emit_move_insn (target
, op0
);
2653 /* If this mode is an integer too wide to compare properly,
2654 compare word by word. Rely on CSE to optimize constant cases. */
2655 if (GET_MODE_CLASS (mode
) == MODE_INT
2656 && ! can_compare_p (GE
, mode
, ccp_jump
))
2657 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2660 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2661 NULL_RTX
, NULL_RTX
, op1
);
2663 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2666 emit_move_insn (target
, op0
);
2672 /* A subroutine of expand_copysign, perform the copysign operation using the
2673 abs and neg primitives advertised to exist on the target. The assumption
2674 is that we have a split register file, and leaving op0 in fp registers,
2675 and not playing with subregs so much, will help the register allocator. */
2678 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2679 int bitpos
, bool op0_is_abs
)
2681 enum machine_mode imode
;
2682 HOST_WIDE_INT hi
, lo
;
2691 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2698 if (target
== NULL_RTX
)
2699 target
= copy_to_reg (op0
);
2701 emit_move_insn (target
, op0
);
2704 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2706 imode
= int_mode_for_mode (mode
);
2707 if (imode
== BLKmode
)
2709 op1
= gen_lowpart (imode
, op1
);
2714 if (FLOAT_WORDS_BIG_ENDIAN
)
2715 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2717 word
= bitpos
/ BITS_PER_WORD
;
2718 bitpos
= bitpos
% BITS_PER_WORD
;
2719 op1
= operand_subword_force (op1
, word
, mode
);
2722 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2725 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2729 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2733 op1
= expand_binop (imode
, and_optab
, op1
,
2734 immed_double_const (lo
, hi
, imode
),
2735 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2737 label
= gen_label_rtx ();
2738 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2740 if (GET_CODE (op0
) == CONST_DOUBLE
)
2741 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2743 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2745 emit_move_insn (target
, op0
);
2753 /* A subroutine of expand_copysign, perform the entire copysign operation
2754 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2755 is true if op0 is known to have its sign bit clear. */
2758 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2759 int bitpos
, bool op0_is_abs
)
2761 enum machine_mode imode
;
2762 HOST_WIDE_INT hi
, lo
;
2763 int word
, nwords
, i
;
2766 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2768 imode
= int_mode_for_mode (mode
);
2769 if (imode
== BLKmode
)
2778 if (FLOAT_WORDS_BIG_ENDIAN
)
2779 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2781 word
= bitpos
/ BITS_PER_WORD
;
2782 bitpos
= bitpos
% BITS_PER_WORD
;
2783 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2786 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2789 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2793 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2797 if (target
== 0 || target
== op0
|| target
== op1
)
2798 target
= gen_reg_rtx (mode
);
2804 for (i
= 0; i
< nwords
; ++i
)
2806 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2807 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2812 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2813 immed_double_const (~lo
, ~hi
, imode
),
2814 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2816 op1
= expand_binop (imode
, and_optab
,
2817 operand_subword_force (op1
, i
, mode
),
2818 immed_double_const (lo
, hi
, imode
),
2819 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2821 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2822 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2823 if (temp
!= targ_piece
)
2824 emit_move_insn (targ_piece
, temp
);
2827 emit_move_insn (targ_piece
, op0_piece
);
2830 insns
= get_insns ();
2833 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2837 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2838 immed_double_const (lo
, hi
, imode
),
2839 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2841 op0
= gen_lowpart (imode
, op0
);
2843 op0
= expand_binop (imode
, and_optab
, op0
,
2844 immed_double_const (~lo
, ~hi
, imode
),
2845 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2847 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2848 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2849 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2855 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2856 scalar floating point mode. Return NULL if we do not know how to
2857 expand the operation inline. */
2860 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2862 enum machine_mode mode
= GET_MODE (op0
);
2863 const struct real_format
*fmt
;
2867 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2868 gcc_assert (GET_MODE (op1
) == mode
);
2870 /* First try to do it with a special instruction. */
2871 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2872 target
, 0, OPTAB_DIRECT
);
2876 fmt
= REAL_MODE_FORMAT (mode
);
2877 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2881 if (GET_CODE (op0
) == CONST_DOUBLE
)
2883 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2884 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2888 if (fmt
->signbit_ro
>= 0
2889 && (GET_CODE (op0
) == CONST_DOUBLE
2890 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2891 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2893 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2894 fmt
->signbit_ro
, op0_is_abs
);
2899 if (fmt
->signbit_rw
< 0)
2901 return expand_copysign_bit (mode
, op0
, op1
, target
,
2902 fmt
->signbit_rw
, op0_is_abs
);
2905 /* Generate an instruction whose insn-code is INSN_CODE,
2906 with two operands: an output TARGET and an input OP0.
2907 TARGET *must* be nonzero, and the output is always stored there.
2908 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2909 the value that is stored into TARGET. */
2912 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2915 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2920 /* Sign and zero extension from memory is often done specially on
2921 RISC machines, so forcing into a register here can pessimize
2923 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2924 op0
= force_not_mem (op0
);
2926 /* Now, if insn does not accept our operands, put them into pseudos. */
2928 if (!insn_data
[icode
].operand
[1].predicate (op0
, mode0
))
2929 op0
= copy_to_mode_reg (mode0
, op0
);
2931 if (!insn_data
[icode
].operand
[0].predicate (temp
, GET_MODE (temp
))
2932 || (flag_force_mem
&& MEM_P (temp
)))
2933 temp
= gen_reg_rtx (GET_MODE (temp
));
2935 pat
= GEN_FCN (icode
) (temp
, op0
);
2937 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2938 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2943 emit_move_insn (target
, temp
);
2946 struct no_conflict_data
2948 rtx target
, first
, insn
;
2952 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
2953 if the currently examined clobber / store has to stay in the list of
2954 insns that constitute the actual no_conflict block. */
2956 no_conflict_move_test (rtx dest
, rtx set
, void *p0
)
2958 struct no_conflict_data
*p
= p0
;
2960 /* If this inns directly contributes to setting the target, it must stay. */
2961 if (reg_overlap_mentioned_p (p
->target
, dest
))
2962 p
->must_stay
= true;
2963 /* If we haven't committed to keeping any other insns in the list yet,
2964 there is nothing more to check. */
2965 else if (p
->insn
== p
->first
)
2967 /* If this insn sets / clobbers a register that feeds one of the insns
2968 already in the list, this insn has to stay too. */
2969 else if (reg_mentioned_p (dest
, PATTERN (p
->first
))
2970 || reg_used_between_p (dest
, p
->first
, p
->insn
)
2971 /* Likewise if this insn depends on a register set by a previous
2972 insn in the list. */
2973 || (GET_CODE (set
) == SET
2974 && (modified_in_p (SET_SRC (set
), p
->first
)
2975 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
))))
2976 p
->must_stay
= true;
2979 /* Emit code to perform a series of operations on a multi-word quantity, one
2982 Such a block is preceded by a CLOBBER of the output, consists of multiple
2983 insns, each setting one word of the output, and followed by a SET copying
2984 the output to itself.
2986 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2987 note indicating that it doesn't conflict with the (also multi-word)
2988 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2991 INSNS is a block of code generated to perform the operation, not including
2992 the CLOBBER and final copy. All insns that compute intermediate values
2993 are first emitted, followed by the block as described above.
2995 TARGET, OP0, and OP1 are the output and inputs of the operations,
2996 respectively. OP1 may be zero for a unary operation.
2998 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3001 If TARGET is not a register, INSNS is simply emitted with no special
3002 processing. Likewise if anything in INSNS is not an INSN or if
3003 there is a libcall block inside INSNS.
3005 The final insn emitted is returned. */
3008 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
3010 rtx prev
, next
, first
, last
, insn
;
3012 if (!REG_P (target
) || reload_in_progress
)
3013 return emit_insn (insns
);
3015 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3016 if (!NONJUMP_INSN_P (insn
)
3017 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
3018 return emit_insn (insns
);
3020 /* First emit all insns that do not store into words of the output and remove
3021 these from the list. */
3022 for (insn
= insns
; insn
; insn
= next
)
3025 struct no_conflict_data data
;
3027 next
= NEXT_INSN (insn
);
3029 /* Some ports (cris) create a libcall regions at their own. We must
3030 avoid any potential nesting of LIBCALLs. */
3031 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3032 remove_note (insn
, note
);
3033 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3034 remove_note (insn
, note
);
3036 data
.target
= target
;
3040 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3041 if (! data
.must_stay
)
3043 if (PREV_INSN (insn
))
3044 NEXT_INSN (PREV_INSN (insn
)) = next
;
3049 PREV_INSN (next
) = PREV_INSN (insn
);
3055 prev
= get_last_insn ();
3057 /* Now write the CLOBBER of the output, followed by the setting of each
3058 of the words, followed by the final copy. */
3059 if (target
!= op0
&& target
!= op1
)
3060 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3062 for (insn
= insns
; insn
; insn
= next
)
3064 next
= NEXT_INSN (insn
);
3067 if (op1
&& REG_P (op1
))
3068 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3071 if (op0
&& REG_P (op0
))
3072 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3076 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3077 != CODE_FOR_nothing
)
3079 last
= emit_move_insn (target
, target
);
3081 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3085 last
= get_last_insn ();
3087 /* Remove any existing REG_EQUAL note from "last", or else it will
3088 be mistaken for a note referring to the full contents of the
3089 alleged libcall value when found together with the REG_RETVAL
3090 note added below. An existing note can come from an insn
3091 expansion at "last". */
3092 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3096 first
= get_insns ();
3098 first
= NEXT_INSN (prev
);
3100 /* Encapsulate the block so it gets manipulated as a unit. */
3101 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3103 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3108 /* Emit code to make a call to a constant function or a library call.
3110 INSNS is a list containing all insns emitted in the call.
3111 These insns leave the result in RESULT. Our block is to copy RESULT
3112 to TARGET, which is logically equivalent to EQUIV.
3114 We first emit any insns that set a pseudo on the assumption that these are
3115 loading constants into registers; doing so allows them to be safely cse'ed
3116 between blocks. Then we emit all the other insns in the block, followed by
3117 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3118 note with an operand of EQUIV.
3120 Moving assignments to pseudos outside of the block is done to improve
3121 the generated code, but is not required to generate correct code,
3122 hence being unable to move an assignment is not grounds for not making
3123 a libcall block. There are two reasons why it is safe to leave these
3124 insns inside the block: First, we know that these pseudos cannot be
3125 used in generated RTL outside the block since they are created for
3126 temporary purposes within the block. Second, CSE will not record the
3127 values of anything set inside a libcall block, so we know they must
3128 be dead at the end of the block.
3130 Except for the first group of insns (the ones setting pseudos), the
3131 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3134 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3136 rtx final_dest
= target
;
3137 rtx prev
, next
, first
, last
, insn
;
3139 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3140 into a MEM later. Protect the libcall block from this change. */
3141 if (! REG_P (target
) || REG_USERVAR_P (target
))
3142 target
= gen_reg_rtx (GET_MODE (target
));
3144 /* If we're using non-call exceptions, a libcall corresponding to an
3145 operation that may trap may also trap. */
3146 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3148 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3151 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3153 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3154 remove_note (insn
, note
);
3158 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3159 reg note to indicate that this call cannot throw or execute a nonlocal
3160 goto (unless there is already a REG_EH_REGION note, in which case
3162 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3165 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3168 XEXP (note
, 0) = constm1_rtx
;
3170 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3174 /* First emit all insns that set pseudos. Remove them from the list as
3175 we go. Avoid insns that set pseudos which were referenced in previous
3176 insns. These can be generated by move_by_pieces, for example,
3177 to update an address. Similarly, avoid insns that reference things
3178 set in previous insns. */
3180 for (insn
= insns
; insn
; insn
= next
)
3182 rtx set
= single_set (insn
);
3185 /* Some ports (cris) create a libcall regions at their own. We must
3186 avoid any potential nesting of LIBCALLs. */
3187 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3188 remove_note (insn
, note
);
3189 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3190 remove_note (insn
, note
);
3192 next
= NEXT_INSN (insn
);
3194 if (set
!= 0 && REG_P (SET_DEST (set
))
3195 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3197 || ((! INSN_P(insns
)
3198 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3199 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3200 && ! modified_in_p (SET_SRC (set
), insns
)
3201 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3203 if (PREV_INSN (insn
))
3204 NEXT_INSN (PREV_INSN (insn
)) = next
;
3209 PREV_INSN (next
) = PREV_INSN (insn
);
3214 /* Some ports use a loop to copy large arguments onto the stack.
3215 Don't move anything outside such a loop. */
3220 prev
= get_last_insn ();
3222 /* Write the remaining insns followed by the final copy. */
3224 for (insn
= insns
; insn
; insn
= next
)
3226 next
= NEXT_INSN (insn
);
3231 last
= emit_move_insn (target
, result
);
3232 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3233 != CODE_FOR_nothing
)
3234 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3237 /* Remove any existing REG_EQUAL note from "last", or else it will
3238 be mistaken for a note referring to the full contents of the
3239 libcall value when found together with the REG_RETVAL note added
3240 below. An existing note can come from an insn expansion at
3242 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3245 if (final_dest
!= target
)
3246 emit_move_insn (final_dest
, target
);
3249 first
= get_insns ();
3251 first
= NEXT_INSN (prev
);
3253 /* Encapsulate the block so it gets manipulated as a unit. */
3254 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3256 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3257 when the encapsulated region would not be in one basic block,
3258 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3260 bool attach_libcall_retval_notes
= true;
3261 next
= NEXT_INSN (last
);
3262 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3263 if (control_flow_insn_p (insn
))
3265 attach_libcall_retval_notes
= false;
3269 if (attach_libcall_retval_notes
)
3271 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3273 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3279 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3280 PURPOSE describes how this comparison will be used. CODE is the rtx
3281 comparison code we will be using.
3283 ??? Actually, CODE is slightly weaker than that. A target is still
3284 required to implement all of the normal bcc operations, but not
3285 required to implement all (or any) of the unordered bcc operations. */
3288 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3289 enum can_compare_purpose purpose
)
3293 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3295 if (purpose
== ccp_jump
)
3296 return bcc_gen_fctn
[(int) code
] != NULL
;
3297 else if (purpose
== ccp_store_flag
)
3298 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3300 /* There's only one cmov entry point, and it's allowed to fail. */
3303 if (purpose
== ccp_jump
3304 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3306 if (purpose
== ccp_cmov
3307 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3309 if (purpose
== ccp_store_flag
3310 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3312 mode
= GET_MODE_WIDER_MODE (mode
);
3314 while (mode
!= VOIDmode
);
3319 /* This function is called when we are going to emit a compare instruction that
3320 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3322 *PMODE is the mode of the inputs (in case they are const_int).
3323 *PUNSIGNEDP nonzero says that the operands are unsigned;
3324 this matters if they need to be widened.
3326 If they have mode BLKmode, then SIZE specifies the size of both operands.
3328 This function performs all the setup necessary so that the caller only has
3329 to emit a single comparison insn. This setup can involve doing a BLKmode
3330 comparison or emitting a library call to perform the comparison if no insn
3331 is available to handle it.
3332 The values which are passed in through pointers can be modified; the caller
3333 should perform the comparison on the modified values. Constant
3334 comparisons must have already been folded. */
3337 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3338 enum machine_mode
*pmode
, int *punsignedp
,
3339 enum can_compare_purpose purpose
)
3341 enum machine_mode mode
= *pmode
;
3342 rtx x
= *px
, y
= *py
;
3343 int unsignedp
= *punsignedp
;
3344 enum mode_class
class;
3346 class = GET_MODE_CLASS (mode
);
3348 if (mode
!= BLKmode
&& flag_force_mem
)
3350 /* Load duplicate non-volatile operands once. */
3351 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3353 x
= force_not_mem (x
);
3358 x
= force_not_mem (x
);
3359 y
= force_not_mem (y
);
3363 /* If we are inside an appropriately-short loop and we are optimizing,
3364 force expensive constants into a register. */
3365 if (CONSTANT_P (x
) && optimize
3366 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3367 x
= force_reg (mode
, x
);
3369 if (CONSTANT_P (y
) && optimize
3370 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3371 y
= force_reg (mode
, y
);
3374 /* Make sure if we have a canonical comparison. The RTL
3375 documentation states that canonical comparisons are required only
3376 for targets which have cc0. */
3377 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3380 /* Don't let both operands fail to indicate the mode. */
3381 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3382 x
= force_reg (mode
, x
);
3384 /* Handle all BLKmode compares. */
3386 if (mode
== BLKmode
)
3388 enum machine_mode cmp_mode
, result_mode
;
3389 enum insn_code cmp_code
;
3394 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3398 /* Try to use a memory block compare insn - either cmpstr
3399 or cmpmem will do. */
3400 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3401 cmp_mode
!= VOIDmode
;
3402 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3404 cmp_code
= cmpmem_optab
[cmp_mode
];
3405 if (cmp_code
== CODE_FOR_nothing
)
3406 cmp_code
= cmpstr_optab
[cmp_mode
];
3407 if (cmp_code
== CODE_FOR_nothing
)
3410 /* Must make sure the size fits the insn's mode. */
3411 if ((GET_CODE (size
) == CONST_INT
3412 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3413 || (GET_MODE_BITSIZE (GET_MODE (size
))
3414 > GET_MODE_BITSIZE (cmp_mode
)))
3417 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3418 result
= gen_reg_rtx (result_mode
);
3419 size
= convert_to_mode (cmp_mode
, size
, 1);
3420 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3424 *pmode
= result_mode
;
3428 /* Otherwise call a library function, memcmp. */
3429 libfunc
= memcmp_libfunc
;
3430 length_type
= sizetype
;
3431 result_mode
= TYPE_MODE (integer_type_node
);
3432 cmp_mode
= TYPE_MODE (length_type
);
3433 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3434 TYPE_UNSIGNED (length_type
));
3436 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3443 *pmode
= result_mode
;
3447 /* Don't allow operands to the compare to trap, as that can put the
3448 compare and branch in different basic blocks. */
3449 if (flag_non_call_exceptions
)
3452 x
= force_reg (mode
, x
);
3454 y
= force_reg (mode
, y
);
3459 if (can_compare_p (*pcomparison
, mode
, purpose
))
3462 /* Handle a lib call just for the mode we are using. */
3464 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3466 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3469 /* If we want unsigned, and this mode has a distinct unsigned
3470 comparison routine, use that. */
3471 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3472 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3474 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3475 word_mode
, 2, x
, mode
, y
, mode
);
3479 if (TARGET_LIB_INT_CMP_BIASED
)
3480 /* Integer comparison returns a result that must be compared
3481 against 1, so that even if we do an unsigned compare
3482 afterward, there is still a value that can represent the
3483 result "less than". */
3493 gcc_assert (class == MODE_FLOAT
);
3494 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3497 /* Before emitting an insn with code ICODE, make sure that X, which is going
3498 to be used for operand OPNUM of the insn, is converted from mode MODE to
3499 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3500 that it is accepted by the operand predicate. Return the new value. */
3503 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3504 enum machine_mode wider_mode
, int unsignedp
)
3506 if (mode
!= wider_mode
)
3507 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3509 if (!insn_data
[icode
].operand
[opnum
].predicate
3510 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3514 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3520 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3521 we can do the comparison.
3522 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3523 be NULL_RTX which indicates that only a comparison is to be generated. */
3526 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3527 enum rtx_code comparison
, int unsignedp
, rtx label
)
3529 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3530 enum mode_class
class = GET_MODE_CLASS (mode
);
3531 enum machine_mode wider_mode
= mode
;
3533 /* Try combined insns first. */
3536 enum insn_code icode
;
3537 PUT_MODE (test
, wider_mode
);
3541 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3543 if (icode
!= CODE_FOR_nothing
3544 && insn_data
[icode
].operand
[0].predicate (test
, wider_mode
))
3546 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3547 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3548 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3553 /* Handle some compares against zero. */
3554 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3555 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3557 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3558 emit_insn (GEN_FCN (icode
) (x
));
3560 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3564 /* Handle compares for which there is a directly suitable insn. */
3566 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3567 if (icode
!= CODE_FOR_nothing
)
3569 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3570 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3571 emit_insn (GEN_FCN (icode
) (x
, y
));
3573 emit_jump_insn (bcc_gen_fctn
[(int) comparison
] (label
));
3577 if (class != MODE_INT
&& class != MODE_FLOAT
3578 && class != MODE_COMPLEX_FLOAT
)
3581 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3583 while (wider_mode
!= VOIDmode
);
3588 /* Generate code to compare X with Y so that the condition codes are
3589 set and to jump to LABEL if the condition is true. If X is a
3590 constant and Y is not a constant, then the comparison is swapped to
3591 ensure that the comparison RTL has the canonical form.
3593 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3594 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3595 the proper branch condition code.
3597 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3599 MODE is the mode of the inputs (in case they are const_int).
3601 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3602 be passed unchanged to emit_cmp_insn, then potentially converted into an
3603 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3606 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3607 enum machine_mode mode
, int unsignedp
, rtx label
)
3609 rtx op0
= x
, op1
= y
;
3611 /* Swap operands and condition to ensure canonical RTL. */
3612 if (swap_commutative_operands_p (x
, y
))
3614 /* If we're not emitting a branch, this means some caller
3619 comparison
= swap_condition (comparison
);
3623 /* If OP0 is still a constant, then both X and Y must be constants.
3624 Force X into a register to create canonical RTL. */
3625 if (CONSTANT_P (op0
))
3626 op0
= force_reg (mode
, op0
);
3630 comparison
= unsigned_condition (comparison
);
3632 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3634 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3637 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3640 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3641 enum machine_mode mode
, int unsignedp
)
3643 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3646 /* Emit a library call comparison between floating point X and Y.
3647 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3650 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3651 enum machine_mode
*pmode
, int *punsignedp
)
3653 enum rtx_code comparison
= *pcomparison
;
3654 enum rtx_code swapped
= swap_condition (comparison
);
3655 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3658 enum machine_mode orig_mode
= GET_MODE (x
);
3659 enum machine_mode mode
;
3660 rtx value
, target
, insns
, equiv
;
3662 bool reversed_p
= false;
3664 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3666 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3669 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3672 tmp
= x
; x
= y
; y
= tmp
;
3673 comparison
= swapped
;
3677 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3678 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3680 comparison
= reversed
;
3686 gcc_assert (mode
!= VOIDmode
);
3688 if (mode
!= orig_mode
)
3690 x
= convert_to_mode (mode
, x
, 0);
3691 y
= convert_to_mode (mode
, y
, 0);
3694 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3695 the RTL. The allows the RTL optimizers to delete the libcall if the
3696 condition can be determined at compile-time. */
3697 if (comparison
== UNORDERED
)
3699 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3700 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3701 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3702 temp
, const_true_rtx
, equiv
);
3706 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3707 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3709 rtx true_rtx
, false_rtx
;
3714 true_rtx
= const0_rtx
;
3715 false_rtx
= const_true_rtx
;
3719 true_rtx
= const_true_rtx
;
3720 false_rtx
= const0_rtx
;
3724 true_rtx
= const1_rtx
;
3725 false_rtx
= const0_rtx
;
3729 true_rtx
= const0_rtx
;
3730 false_rtx
= constm1_rtx
;
3734 true_rtx
= constm1_rtx
;
3735 false_rtx
= const0_rtx
;
3739 true_rtx
= const0_rtx
;
3740 false_rtx
= const1_rtx
;
3746 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3747 equiv
, true_rtx
, false_rtx
);
3752 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3753 word_mode
, 2, x
, mode
, y
, mode
);
3754 insns
= get_insns ();
3757 target
= gen_reg_rtx (word_mode
);
3758 emit_libcall_block (insns
, target
, value
, equiv
);
3760 if (comparison
== UNORDERED
3761 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3762 comparison
= reversed_p
? EQ
: NE
;
3767 *pcomparison
= comparison
;
3771 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3774 emit_indirect_jump (rtx loc
)
3776 if (!insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
3778 loc
= copy_to_mode_reg (Pmode
, loc
);
3780 emit_jump_insn (gen_indirect_jump (loc
));
3784 #ifdef HAVE_conditional_move
3786 /* Emit a conditional move instruction if the machine supports one for that
3787 condition and machine mode.
3789 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3790 the mode to use should they be constants. If it is VOIDmode, they cannot
3793 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3794 should be stored there. MODE is the mode to use should they be constants.
3795 If it is VOIDmode, they cannot both be constants.
3797 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3798 is not supported. */
3801 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3802 enum machine_mode cmode
, rtx op2
, rtx op3
,
3803 enum machine_mode mode
, int unsignedp
)
3805 rtx tem
, subtarget
, comparison
, insn
;
3806 enum insn_code icode
;
3807 enum rtx_code reversed
;
3809 /* If one operand is constant, make it the second one. Only do this
3810 if the other operand is not constant as well. */
3812 if (swap_commutative_operands_p (op0
, op1
))
3817 code
= swap_condition (code
);
3820 /* get_condition will prefer to generate LT and GT even if the old
3821 comparison was against zero, so undo that canonicalization here since
3822 comparisons against zero are cheaper. */
3823 if (code
== LT
&& op1
== const1_rtx
)
3824 code
= LE
, op1
= const0_rtx
;
3825 else if (code
== GT
&& op1
== constm1_rtx
)
3826 code
= GE
, op1
= const0_rtx
;
3828 if (cmode
== VOIDmode
)
3829 cmode
= GET_MODE (op0
);
3831 if (swap_commutative_operands_p (op2
, op3
)
3832 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3841 if (mode
== VOIDmode
)
3842 mode
= GET_MODE (op2
);
3844 icode
= movcc_gen_code
[mode
];
3846 if (icode
== CODE_FOR_nothing
)
3851 op2
= force_not_mem (op2
);
3852 op3
= force_not_mem (op3
);
3856 target
= gen_reg_rtx (mode
);
3860 /* If the insn doesn't accept these operands, put them in pseudos. */
3862 if (!insn_data
[icode
].operand
[0].predicate
3863 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3864 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3866 if (!insn_data
[icode
].operand
[2].predicate
3867 (op2
, insn_data
[icode
].operand
[2].mode
))
3868 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3870 if (!insn_data
[icode
].operand
[3].predicate
3871 (op3
, insn_data
[icode
].operand
[3].mode
))
3872 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3874 /* Everything should now be in the suitable form, so emit the compare insn
3875 and then the conditional move. */
3878 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3880 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3881 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3882 return NULL and let the caller figure out how best to deal with this
3884 if (GET_CODE (comparison
) != code
)
3887 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3889 /* If that failed, then give up. */
3895 if (subtarget
!= target
)
3896 convert_move (target
, subtarget
, 0);
3901 /* Return nonzero if a conditional move of mode MODE is supported.
3903 This function is for combine so it can tell whether an insn that looks
3904 like a conditional move is actually supported by the hardware. If we
3905 guess wrong we lose a bit on optimization, but that's it. */
3906 /* ??? sparc64 supports conditionally moving integers values based on fp
3907 comparisons, and vice versa. How do we handle them? */
3910 can_conditionally_move_p (enum machine_mode mode
)
3912 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3918 #endif /* HAVE_conditional_move */
3920 /* Emit a conditional addition instruction if the machine supports one for that
3921 condition and machine mode.
3923 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3924 the mode to use should they be constants. If it is VOIDmode, they cannot
3927 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3928 should be stored there. MODE is the mode to use should they be constants.
3929 If it is VOIDmode, they cannot both be constants.
3931 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3932 is not supported. */
3935 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3936 enum machine_mode cmode
, rtx op2
, rtx op3
,
3937 enum machine_mode mode
, int unsignedp
)
3939 rtx tem
, subtarget
, comparison
, insn
;
3940 enum insn_code icode
;
3941 enum rtx_code reversed
;
3943 /* If one operand is constant, make it the second one. Only do this
3944 if the other operand is not constant as well. */
3946 if (swap_commutative_operands_p (op0
, op1
))
3951 code
= swap_condition (code
);
3954 /* get_condition will prefer to generate LT and GT even if the old
3955 comparison was against zero, so undo that canonicalization here since
3956 comparisons against zero are cheaper. */
3957 if (code
== LT
&& op1
== const1_rtx
)
3958 code
= LE
, op1
= const0_rtx
;
3959 else if (code
== GT
&& op1
== constm1_rtx
)
3960 code
= GE
, op1
= const0_rtx
;
3962 if (cmode
== VOIDmode
)
3963 cmode
= GET_MODE (op0
);
3965 if (swap_commutative_operands_p (op2
, op3
)
3966 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3975 if (mode
== VOIDmode
)
3976 mode
= GET_MODE (op2
);
3978 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3980 if (icode
== CODE_FOR_nothing
)
3985 op2
= force_not_mem (op2
);
3986 op3
= force_not_mem (op3
);
3990 target
= gen_reg_rtx (mode
);
3992 /* If the insn doesn't accept these operands, put them in pseudos. */
3994 if (!insn_data
[icode
].operand
[0].predicate
3995 (target
, insn_data
[icode
].operand
[0].mode
))
3996 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
4000 if (!insn_data
[icode
].operand
[2].predicate
4001 (op2
, insn_data
[icode
].operand
[2].mode
))
4002 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
4004 if (!insn_data
[icode
].operand
[3].predicate
4005 (op3
, insn_data
[icode
].operand
[3].mode
))
4006 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4008 /* Everything should now be in the suitable form, so emit the compare insn
4009 and then the conditional move. */
4012 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4014 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4015 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4016 return NULL and let the caller figure out how best to deal with this
4018 if (GET_CODE (comparison
) != code
)
4021 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4023 /* If that failed, then give up. */
4029 if (subtarget
!= target
)
4030 convert_move (target
, subtarget
, 0);
4035 /* These functions attempt to generate an insn body, rather than
4036 emitting the insn, but if the gen function already emits them, we
4037 make no attempt to turn them back into naked patterns. */
4039 /* Generate and return an insn body to add Y to X. */
4042 gen_add2_insn (rtx x
, rtx y
)
4044 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4046 gcc_assert (insn_data
[icode
].operand
[0].predicate
4047 (x
, insn_data
[icode
].operand
[0].mode
));
4048 gcc_assert (insn_data
[icode
].operand
[1].predicate
4049 (x
, insn_data
[icode
].operand
[1].mode
));
4050 gcc_assert (insn_data
[icode
].operand
[2].predicate
4051 (y
, insn_data
[icode
].operand
[2].mode
));
4053 return GEN_FCN (icode
) (x
, x
, y
);
4056 /* Generate and return an insn body to add r1 and c,
4057 storing the result in r0. */
4059 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4061 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4063 if (icode
== CODE_FOR_nothing
4064 || !(insn_data
[icode
].operand
[0].predicate
4065 (r0
, insn_data
[icode
].operand
[0].mode
))
4066 || !(insn_data
[icode
].operand
[1].predicate
4067 (r1
, insn_data
[icode
].operand
[1].mode
))
4068 || !(insn_data
[icode
].operand
[2].predicate
4069 (c
, insn_data
[icode
].operand
[2].mode
)))
4072 return GEN_FCN (icode
) (r0
, r1
, c
);
4076 have_add2_insn (rtx x
, rtx y
)
4080 gcc_assert (GET_MODE (x
) != VOIDmode
);
4082 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4084 if (icode
== CODE_FOR_nothing
)
4087 if (!(insn_data
[icode
].operand
[0].predicate
4088 (x
, insn_data
[icode
].operand
[0].mode
))
4089 || !(insn_data
[icode
].operand
[1].predicate
4090 (x
, insn_data
[icode
].operand
[1].mode
))
4091 || !(insn_data
[icode
].operand
[2].predicate
4092 (y
, insn_data
[icode
].operand
[2].mode
)))
4098 /* Generate and return an insn body to subtract Y from X. */
4101 gen_sub2_insn (rtx x
, rtx y
)
4103 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4105 gcc_assert (insn_data
[icode
].operand
[0].predicate
4106 (x
, insn_data
[icode
].operand
[0].mode
));
4107 gcc_assert (insn_data
[icode
].operand
[1].predicate
4108 (x
, insn_data
[icode
].operand
[1].mode
));
4109 gcc_assert (insn_data
[icode
].operand
[2].predicate
4110 (y
, insn_data
[icode
].operand
[2].mode
));
4112 return GEN_FCN (icode
) (x
, x
, y
);
4115 /* Generate and return an insn body to subtract r1 and c,
4116 storing the result in r0. */
4118 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4120 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4122 if (icode
== CODE_FOR_nothing
4123 || !(insn_data
[icode
].operand
[0].predicate
4124 (r0
, insn_data
[icode
].operand
[0].mode
))
4125 || !(insn_data
[icode
].operand
[1].predicate
4126 (r1
, insn_data
[icode
].operand
[1].mode
))
4127 || !(insn_data
[icode
].operand
[2].predicate
4128 (c
, insn_data
[icode
].operand
[2].mode
)))
4131 return GEN_FCN (icode
) (r0
, r1
, c
);
4135 have_sub2_insn (rtx x
, rtx y
)
4139 gcc_assert (GET_MODE (x
) != VOIDmode
);
4141 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4143 if (icode
== CODE_FOR_nothing
)
4146 if (!(insn_data
[icode
].operand
[0].predicate
4147 (x
, insn_data
[icode
].operand
[0].mode
))
4148 || !(insn_data
[icode
].operand
[1].predicate
4149 (x
, insn_data
[icode
].operand
[1].mode
))
4150 || !(insn_data
[icode
].operand
[2].predicate
4151 (y
, insn_data
[icode
].operand
[2].mode
)))
4157 /* Generate the body of an instruction to copy Y into X.
4158 It may be a list of insns, if one insn isn't enough. */
4161 gen_move_insn (rtx x
, rtx y
)
4166 emit_move_insn_1 (x
, y
);
4172 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4173 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4174 no such operation exists, CODE_FOR_nothing will be returned. */
4177 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4181 #ifdef HAVE_ptr_extend
4183 return CODE_FOR_ptr_extend
;
4186 tab
= unsignedp
? zext_optab
: sext_optab
;
4187 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4190 /* Generate the body of an insn to extend Y (with mode MFROM)
4191 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4194 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4195 enum machine_mode mfrom
, int unsignedp
)
4197 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4198 return GEN_FCN (icode
) (x
, y
);
4201 /* can_fix_p and can_float_p say whether the target machine
4202 can directly convert a given fixed point type to
4203 a given floating point type, or vice versa.
4204 The returned value is the CODE_FOR_... value to use,
4205 or CODE_FOR_nothing if these modes cannot be directly converted.
4207 *TRUNCP_PTR is set to 1 if it is necessary to output
4208 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4210 static enum insn_code
4211 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4212 int unsignedp
, int *truncp_ptr
)
4215 enum insn_code icode
;
4217 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4218 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4219 if (icode
!= CODE_FOR_nothing
)
4225 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4226 for this to work. We need to rework the fix* and ftrunc* patterns
4227 and documentation. */
4228 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4229 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4230 if (icode
!= CODE_FOR_nothing
4231 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4238 return CODE_FOR_nothing
;
4241 static enum insn_code
4242 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4247 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4248 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4251 /* Generate code to convert FROM to floating point
4252 and store in TO. FROM must be fixed point and not VOIDmode.
4253 UNSIGNEDP nonzero means regard FROM as unsigned.
4254 Normally this is done by correcting the final value
4255 if it is negative. */
4258 expand_float (rtx to
, rtx from
, int unsignedp
)
4260 enum insn_code icode
;
4262 enum machine_mode fmode
, imode
;
4264 /* Crash now, because we won't be able to decide which mode to use. */
4265 gcc_assert (GET_MODE (from
) != VOIDmode
);
4267 /* Look for an insn to do the conversion. Do it in the specified
4268 modes if possible; otherwise convert either input, output or both to
4269 wider mode. If the integer mode is wider than the mode of FROM,
4270 we can do the conversion signed even if the input is unsigned. */
4272 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4273 fmode
= GET_MODE_WIDER_MODE (fmode
))
4274 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4275 imode
= GET_MODE_WIDER_MODE (imode
))
4277 int doing_unsigned
= unsignedp
;
4279 if (fmode
!= GET_MODE (to
)
4280 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4283 icode
= can_float_p (fmode
, imode
, unsignedp
);
4284 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4285 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4287 if (icode
!= CODE_FOR_nothing
)
4289 if (imode
!= GET_MODE (from
))
4290 from
= convert_to_mode (imode
, from
, unsignedp
);
4292 if (fmode
!= GET_MODE (to
))
4293 target
= gen_reg_rtx (fmode
);
4295 emit_unop_insn (icode
, target
, from
,
4296 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4299 convert_move (to
, target
, 0);
4304 /* Unsigned integer, and no way to convert directly.
4305 Convert as signed, then conditionally adjust the result. */
4308 rtx label
= gen_label_rtx ();
4310 REAL_VALUE_TYPE offset
;
4313 from
= force_not_mem (from
);
4315 /* Look for a usable floating mode FMODE wider than the source and at
4316 least as wide as the target. Using FMODE will avoid rounding woes
4317 with unsigned values greater than the signed maximum value. */
4319 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4320 fmode
= GET_MODE_WIDER_MODE (fmode
))
4321 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4322 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4325 if (fmode
== VOIDmode
)
4327 /* There is no such mode. Pretend the target is wide enough. */
4328 fmode
= GET_MODE (to
);
4330 /* Avoid double-rounding when TO is narrower than FROM. */
4331 if ((significand_size (fmode
) + 1)
4332 < GET_MODE_BITSIZE (GET_MODE (from
)))
4335 rtx neglabel
= gen_label_rtx ();
4337 /* Don't use TARGET if it isn't a register, is a hard register,
4338 or is the wrong mode. */
4340 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4341 || GET_MODE (target
) != fmode
)
4342 target
= gen_reg_rtx (fmode
);
4344 imode
= GET_MODE (from
);
4345 do_pending_stack_adjust ();
4347 /* Test whether the sign bit is set. */
4348 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4351 /* The sign bit is not set. Convert as signed. */
4352 expand_float (target
, from
, 0);
4353 emit_jump_insn (gen_jump (label
));
4356 /* The sign bit is set.
4357 Convert to a usable (positive signed) value by shifting right
4358 one bit, while remembering if a nonzero bit was shifted
4359 out; i.e., compute (from & 1) | (from >> 1). */
4361 emit_label (neglabel
);
4362 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4363 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4364 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4366 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4368 expand_float (target
, temp
, 0);
4370 /* Multiply by 2 to undo the shift above. */
4371 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4372 target
, 0, OPTAB_LIB_WIDEN
);
4374 emit_move_insn (target
, temp
);
4376 do_pending_stack_adjust ();
4382 /* If we are about to do some arithmetic to correct for an
4383 unsigned operand, do it in a pseudo-register. */
4385 if (GET_MODE (to
) != fmode
4386 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4387 target
= gen_reg_rtx (fmode
);
4389 /* Convert as signed integer to floating. */
4390 expand_float (target
, from
, 0);
4392 /* If FROM is negative (and therefore TO is negative),
4393 correct its value by 2**bitwidth. */
4395 do_pending_stack_adjust ();
4396 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4400 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4401 temp
= expand_binop (fmode
, add_optab
, target
,
4402 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4403 target
, 0, OPTAB_LIB_WIDEN
);
4405 emit_move_insn (target
, temp
);
4407 do_pending_stack_adjust ();
4412 /* No hardware instruction available; call a library routine. */
4417 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4419 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4420 from
= convert_to_mode (SImode
, from
, unsignedp
);
4423 from
= force_not_mem (from
);
4425 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4426 gcc_assert (libfunc
);
4430 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4431 GET_MODE (to
), 1, from
,
4433 insns
= get_insns ();
4436 emit_libcall_block (insns
, target
, value
,
4437 gen_rtx_FLOAT (GET_MODE (to
), from
));
4442 /* Copy result to requested destination
4443 if we have been computing in a temp location. */
4447 if (GET_MODE (target
) == GET_MODE (to
))
4448 emit_move_insn (to
, target
);
4450 convert_move (to
, target
, 0);
4454 /* Generate code to convert FROM to fixed point and store in TO. FROM
4455 must be floating point. */
4458 expand_fix (rtx to
, rtx from
, int unsignedp
)
4460 enum insn_code icode
;
4462 enum machine_mode fmode
, imode
;
4465 /* We first try to find a pair of modes, one real and one integer, at
4466 least as wide as FROM and TO, respectively, in which we can open-code
4467 this conversion. If the integer mode is wider than the mode of TO,
4468 we can do the conversion either signed or unsigned. */
4470 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4471 fmode
= GET_MODE_WIDER_MODE (fmode
))
4472 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4473 imode
= GET_MODE_WIDER_MODE (imode
))
4475 int doing_unsigned
= unsignedp
;
4477 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4478 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4479 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4481 if (icode
!= CODE_FOR_nothing
)
4483 if (fmode
!= GET_MODE (from
))
4484 from
= convert_to_mode (fmode
, from
, 0);
4488 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4489 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4493 if (imode
!= GET_MODE (to
))
4494 target
= gen_reg_rtx (imode
);
4496 emit_unop_insn (icode
, target
, from
,
4497 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4499 convert_move (to
, target
, unsignedp
);
4504 /* For an unsigned conversion, there is one more way to do it.
4505 If we have a signed conversion, we generate code that compares
4506 the real value to the largest representable positive number. If if
4507 is smaller, the conversion is done normally. Otherwise, subtract
4508 one plus the highest signed number, convert, and add it back.
4510 We only need to check all real modes, since we know we didn't find
4511 anything with a wider integer mode.
4513 This code used to extend FP value into mode wider than the destination.
4514 This is not needed. Consider, for instance conversion from SFmode
4517 The hot path trought the code is dealing with inputs smaller than 2^63
4518 and doing just the conversion, so there is no bits to lose.
4520 In the other path we know the value is positive in the range 2^63..2^64-1
4521 inclusive. (as for other imput overflow happens and result is undefined)
4522 So we know that the most important bit set in mantissa corresponds to
4523 2^63. The subtraction of 2^63 should not generate any rounding as it
4524 simply clears out that bit. The rest is trivial. */
4526 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4527 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4528 fmode
= GET_MODE_WIDER_MODE (fmode
))
4529 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4533 REAL_VALUE_TYPE offset
;
4534 rtx limit
, lab1
, lab2
, insn
;
4536 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4537 real_2expN (&offset
, bitsize
- 1);
4538 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4539 lab1
= gen_label_rtx ();
4540 lab2
= gen_label_rtx ();
4543 from
= force_not_mem (from
);
4545 if (fmode
!= GET_MODE (from
))
4546 from
= convert_to_mode (fmode
, from
, 0);
4548 /* See if we need to do the subtraction. */
4549 do_pending_stack_adjust ();
4550 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4553 /* If not, do the signed "fix" and branch around fixup code. */
4554 expand_fix (to
, from
, 0);
4555 emit_jump_insn (gen_jump (lab2
));
4558 /* Otherwise, subtract 2**(N-1), convert to signed number,
4559 then add 2**(N-1). Do the addition using XOR since this
4560 will often generate better code. */
4562 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4563 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4564 expand_fix (to
, target
, 0);
4565 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4567 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4569 to
, 1, OPTAB_LIB_WIDEN
);
4572 emit_move_insn (to
, target
);
4576 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4577 != CODE_FOR_nothing
)
4579 /* Make a place for a REG_NOTE and add it. */
4580 insn
= emit_move_insn (to
, to
);
4581 set_unique_reg_note (insn
,
4583 gen_rtx_fmt_e (UNSIGNED_FIX
,
4591 /* We can't do it with an insn, so use a library call. But first ensure
4592 that the mode of TO is at least as wide as SImode, since those are the
4593 only library calls we know about. */
4595 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4597 target
= gen_reg_rtx (SImode
);
4599 expand_fix (target
, from
, unsignedp
);
4607 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4608 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4609 gcc_assert (libfunc
);
4612 from
= force_not_mem (from
);
4616 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4617 GET_MODE (to
), 1, from
,
4619 insns
= get_insns ();
4622 emit_libcall_block (insns
, target
, value
,
4623 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4624 GET_MODE (to
), from
));
4629 if (GET_MODE (to
) == GET_MODE (target
))
4630 emit_move_insn (to
, target
);
4632 convert_move (to
, target
, 0);
4636 /* Report whether we have an instruction to perform the operation
4637 specified by CODE on operands of mode MODE. */
4639 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4641 return (code_to_optab
[(int) code
] != 0
4642 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4643 != CODE_FOR_nothing
));
4646 /* Create a blank optab. */
4651 optab op
= ggc_alloc (sizeof (struct optab
));
4652 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4654 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4655 op
->handlers
[i
].libfunc
= 0;
4661 static convert_optab
4662 new_convert_optab (void)
4665 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4666 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4667 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4669 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4670 op
->handlers
[i
][j
].libfunc
= 0;
4675 /* Same, but fill in its code as CODE, and write it into the
4676 code_to_optab table. */
4678 init_optab (enum rtx_code code
)
4680 optab op
= new_optab ();
4682 code_to_optab
[(int) code
] = op
;
4686 /* Same, but fill in its code as CODE, and do _not_ write it into
4687 the code_to_optab table. */
4689 init_optabv (enum rtx_code code
)
4691 optab op
= new_optab ();
4696 /* Conversion optabs never go in the code_to_optab table. */
4697 static inline convert_optab
4698 init_convert_optab (enum rtx_code code
)
4700 convert_optab op
= new_convert_optab ();
4705 /* Initialize the libfunc fields of an entire group of entries in some
4706 optab. Each entry is set equal to a string consisting of a leading
4707 pair of underscores followed by a generic operation name followed by
4708 a mode name (downshifted to lowercase) followed by a single character
4709 representing the number of operands for the given operation (which is
4710 usually one of the characters '2', '3', or '4').
4712 OPTABLE is the table in which libfunc fields are to be initialized.
4713 FIRST_MODE is the first machine mode index in the given optab to
4715 LAST_MODE is the last machine mode index in the given optab to
4717 OPNAME is the generic (string) name of the operation.
4718 SUFFIX is the character which specifies the number of operands for
4719 the given generic operation.
4723 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4724 const char *opname
, int suffix
)
4727 unsigned opname_len
= strlen (opname
);
4729 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4730 mode
= (enum machine_mode
) ((int) mode
+ 1))
4732 const char *mname
= GET_MODE_NAME (mode
);
4733 unsigned mname_len
= strlen (mname
);
4734 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4741 for (q
= opname
; *q
; )
4743 for (q
= mname
; *q
; q
++)
4744 *p
++ = TOLOWER (*q
);
4748 optable
->handlers
[(int) mode
].libfunc
4749 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4753 /* Initialize the libfunc fields of an entire group of entries in some
4754 optab which correspond to all integer mode operations. The parameters
4755 have the same meaning as similarly named ones for the `init_libfuncs'
4756 routine. (See above). */
4759 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4761 int maxsize
= 2*BITS_PER_WORD
;
4762 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4763 maxsize
= LONG_LONG_TYPE_SIZE
;
4764 init_libfuncs (optable
, word_mode
,
4765 mode_for_size (maxsize
, MODE_INT
, 0),
4769 /* Initialize the libfunc fields of an entire group of entries in some
4770 optab which correspond to all real mode operations. The parameters
4771 have the same meaning as similarly named ones for the `init_libfuncs'
4772 routine. (See above). */
4775 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4777 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4780 /* Initialize the libfunc fields of an entire group of entries of an
4781 inter-mode-class conversion optab. The string formation rules are
4782 similar to the ones for init_libfuncs, above, but instead of having
4783 a mode name and an operand count these functions have two mode names
4784 and no operand count. */
4786 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4787 enum mode_class from_class
,
4788 enum mode_class to_class
)
4790 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4791 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4792 size_t opname_len
= strlen (opname
);
4793 size_t max_mname_len
= 0;
4795 enum machine_mode fmode
, tmode
;
4796 const char *fname
, *tname
;
4798 char *libfunc_name
, *suffix
;
4801 for (fmode
= first_from_mode
;
4803 fmode
= GET_MODE_WIDER_MODE (fmode
))
4804 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4806 for (tmode
= first_to_mode
;
4808 tmode
= GET_MODE_WIDER_MODE (tmode
))
4809 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4811 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4812 libfunc_name
[0] = '_';
4813 libfunc_name
[1] = '_';
4814 memcpy (&libfunc_name
[2], opname
, opname_len
);
4815 suffix
= libfunc_name
+ opname_len
+ 2;
4817 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4818 fmode
= GET_MODE_WIDER_MODE (fmode
))
4819 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4820 tmode
= GET_MODE_WIDER_MODE (tmode
))
4822 fname
= GET_MODE_NAME (fmode
);
4823 tname
= GET_MODE_NAME (tmode
);
4826 for (q
= fname
; *q
; p
++, q
++)
4828 for (q
= tname
; *q
; p
++, q
++)
4833 tab
->handlers
[tmode
][fmode
].libfunc
4834 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4839 /* Initialize the libfunc fields of an entire group of entries of an
4840 intra-mode-class conversion optab. The string formation rules are
4841 similar to the ones for init_libfunc, above. WIDENING says whether
4842 the optab goes from narrow to wide modes or vice versa. These functions
4843 have two mode names _and_ an operand count. */
4845 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4846 enum mode_class
class, bool widening
)
4848 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4849 size_t opname_len
= strlen (opname
);
4850 size_t max_mname_len
= 0;
4852 enum machine_mode nmode
, wmode
;
4853 const char *nname
, *wname
;
4855 char *libfunc_name
, *suffix
;
4858 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4859 nmode
= GET_MODE_WIDER_MODE (nmode
))
4860 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4862 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4863 libfunc_name
[0] = '_';
4864 libfunc_name
[1] = '_';
4865 memcpy (&libfunc_name
[2], opname
, opname_len
);
4866 suffix
= libfunc_name
+ opname_len
+ 2;
4868 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4869 nmode
= GET_MODE_WIDER_MODE (nmode
))
4870 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4871 wmode
= GET_MODE_WIDER_MODE (wmode
))
4873 nname
= GET_MODE_NAME (nmode
);
4874 wname
= GET_MODE_NAME (wmode
);
4877 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4879 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4885 tab
->handlers
[widening
? wmode
: nmode
]
4886 [widening
? nmode
: wmode
].libfunc
4887 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4894 init_one_libfunc (const char *name
)
4898 /* Create a FUNCTION_DECL that can be passed to
4899 targetm.encode_section_info. */
4900 /* ??? We don't have any type information except for this is
4901 a function. Pretend this is "int foo()". */
4902 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4903 build_function_type (integer_type_node
, NULL_TREE
));
4904 DECL_ARTIFICIAL (decl
) = 1;
4905 DECL_EXTERNAL (decl
) = 1;
4906 TREE_PUBLIC (decl
) = 1;
4908 symbol
= XEXP (DECL_RTL (decl
), 0);
4910 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4911 are the flags assigned by targetm.encode_section_info. */
4912 SYMBOL_REF_DECL (symbol
) = 0;
4917 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4918 MODE to NAME, which should be either 0 or a string constant. */
4920 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4923 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4925 optable
->handlers
[mode
].libfunc
= 0;
4928 /* Call this to reset the function entry for one conversion optab
4929 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4930 either 0 or a string constant. */
4932 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4933 enum machine_mode fmode
, const char *name
)
4936 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4938 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4941 /* Call this once to initialize the contents of the optabs
4942 appropriately for the current target machine. */
4949 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4951 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4952 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4954 #ifdef HAVE_conditional_move
4955 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4956 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4959 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4961 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4962 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4965 add_optab
= init_optab (PLUS
);
4966 addv_optab
= init_optabv (PLUS
);
4967 sub_optab
= init_optab (MINUS
);
4968 subv_optab
= init_optabv (MINUS
);
4969 smul_optab
= init_optab (MULT
);
4970 smulv_optab
= init_optabv (MULT
);
4971 smul_highpart_optab
= init_optab (UNKNOWN
);
4972 umul_highpart_optab
= init_optab (UNKNOWN
);
4973 smul_widen_optab
= init_optab (UNKNOWN
);
4974 umul_widen_optab
= init_optab (UNKNOWN
);
4975 sdiv_optab
= init_optab (DIV
);
4976 sdivv_optab
= init_optabv (DIV
);
4977 sdivmod_optab
= init_optab (UNKNOWN
);
4978 udiv_optab
= init_optab (UDIV
);
4979 udivmod_optab
= init_optab (UNKNOWN
);
4980 smod_optab
= init_optab (MOD
);
4981 umod_optab
= init_optab (UMOD
);
4982 fmod_optab
= init_optab (UNKNOWN
);
4983 drem_optab
= init_optab (UNKNOWN
);
4984 ftrunc_optab
= init_optab (UNKNOWN
);
4985 and_optab
= init_optab (AND
);
4986 ior_optab
= init_optab (IOR
);
4987 xor_optab
= init_optab (XOR
);
4988 ashl_optab
= init_optab (ASHIFT
);
4989 ashr_optab
= init_optab (ASHIFTRT
);
4990 lshr_optab
= init_optab (LSHIFTRT
);
4991 rotl_optab
= init_optab (ROTATE
);
4992 rotr_optab
= init_optab (ROTATERT
);
4993 smin_optab
= init_optab (SMIN
);
4994 smax_optab
= init_optab (SMAX
);
4995 umin_optab
= init_optab (UMIN
);
4996 umax_optab
= init_optab (UMAX
);
4997 pow_optab
= init_optab (UNKNOWN
);
4998 atan2_optab
= init_optab (UNKNOWN
);
5000 /* These three have codes assigned exclusively for the sake of
5002 mov_optab
= init_optab (SET
);
5003 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5004 cmp_optab
= init_optab (COMPARE
);
5006 ucmp_optab
= init_optab (UNKNOWN
);
5007 tst_optab
= init_optab (UNKNOWN
);
5009 eq_optab
= init_optab (EQ
);
5010 ne_optab
= init_optab (NE
);
5011 gt_optab
= init_optab (GT
);
5012 ge_optab
= init_optab (GE
);
5013 lt_optab
= init_optab (LT
);
5014 le_optab
= init_optab (LE
);
5015 unord_optab
= init_optab (UNORDERED
);
5017 neg_optab
= init_optab (NEG
);
5018 negv_optab
= init_optabv (NEG
);
5019 abs_optab
= init_optab (ABS
);
5020 absv_optab
= init_optabv (ABS
);
5021 addcc_optab
= init_optab (UNKNOWN
);
5022 one_cmpl_optab
= init_optab (NOT
);
5023 ffs_optab
= init_optab (FFS
);
5024 clz_optab
= init_optab (CLZ
);
5025 ctz_optab
= init_optab (CTZ
);
5026 popcount_optab
= init_optab (POPCOUNT
);
5027 parity_optab
= init_optab (PARITY
);
5028 sqrt_optab
= init_optab (SQRT
);
5029 floor_optab
= init_optab (UNKNOWN
);
5030 lfloor_optab
= init_optab (UNKNOWN
);
5031 ceil_optab
= init_optab (UNKNOWN
);
5032 lceil_optab
= init_optab (UNKNOWN
);
5033 round_optab
= init_optab (UNKNOWN
);
5034 btrunc_optab
= init_optab (UNKNOWN
);
5035 nearbyint_optab
= init_optab (UNKNOWN
);
5036 rint_optab
= init_optab (UNKNOWN
);
5037 lrint_optab
= init_optab (UNKNOWN
);
5038 sincos_optab
= init_optab (UNKNOWN
);
5039 sin_optab
= init_optab (UNKNOWN
);
5040 asin_optab
= init_optab (UNKNOWN
);
5041 cos_optab
= init_optab (UNKNOWN
);
5042 acos_optab
= init_optab (UNKNOWN
);
5043 exp_optab
= init_optab (UNKNOWN
);
5044 exp10_optab
= init_optab (UNKNOWN
);
5045 exp2_optab
= init_optab (UNKNOWN
);
5046 expm1_optab
= init_optab (UNKNOWN
);
5047 ldexp_optab
= init_optab (UNKNOWN
);
5048 logb_optab
= init_optab (UNKNOWN
);
5049 ilogb_optab
= init_optab (UNKNOWN
);
5050 log_optab
= init_optab (UNKNOWN
);
5051 log10_optab
= init_optab (UNKNOWN
);
5052 log2_optab
= init_optab (UNKNOWN
);
5053 log1p_optab
= init_optab (UNKNOWN
);
5054 tan_optab
= init_optab (UNKNOWN
);
5055 atan_optab
= init_optab (UNKNOWN
);
5056 copysign_optab
= init_optab (UNKNOWN
);
5058 strlen_optab
= init_optab (UNKNOWN
);
5059 cbranch_optab
= init_optab (UNKNOWN
);
5060 cmov_optab
= init_optab (UNKNOWN
);
5061 cstore_optab
= init_optab (UNKNOWN
);
5062 push_optab
= init_optab (UNKNOWN
);
5064 vec_extract_optab
= init_optab (UNKNOWN
);
5065 vec_set_optab
= init_optab (UNKNOWN
);
5066 vec_init_optab
= init_optab (UNKNOWN
);
5067 vec_realign_load_optab
= init_optab (UNKNOWN
);
5068 movmisalign_optab
= init_optab (UNKNOWN
);
5070 powi_optab
= init_optab (UNKNOWN
);
5073 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5074 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5075 trunc_optab
= init_convert_optab (TRUNCATE
);
5076 sfix_optab
= init_convert_optab (FIX
);
5077 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5078 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5079 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5080 sfloat_optab
= init_convert_optab (FLOAT
);
5081 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5083 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5085 movmem_optab
[i
] = CODE_FOR_nothing
;
5086 clrmem_optab
[i
] = CODE_FOR_nothing
;
5087 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5088 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5090 sync_add_optab
[i
] = CODE_FOR_nothing
;
5091 sync_sub_optab
[i
] = CODE_FOR_nothing
;
5092 sync_ior_optab
[i
] = CODE_FOR_nothing
;
5093 sync_and_optab
[i
] = CODE_FOR_nothing
;
5094 sync_xor_optab
[i
] = CODE_FOR_nothing
;
5095 sync_nand_optab
[i
] = CODE_FOR_nothing
;
5096 sync_old_add_optab
[i
] = CODE_FOR_nothing
;
5097 sync_old_sub_optab
[i
] = CODE_FOR_nothing
;
5098 sync_old_ior_optab
[i
] = CODE_FOR_nothing
;
5099 sync_old_and_optab
[i
] = CODE_FOR_nothing
;
5100 sync_old_xor_optab
[i
] = CODE_FOR_nothing
;
5101 sync_old_nand_optab
[i
] = CODE_FOR_nothing
;
5102 sync_new_add_optab
[i
] = CODE_FOR_nothing
;
5103 sync_new_sub_optab
[i
] = CODE_FOR_nothing
;
5104 sync_new_ior_optab
[i
] = CODE_FOR_nothing
;
5105 sync_new_and_optab
[i
] = CODE_FOR_nothing
;
5106 sync_new_xor_optab
[i
] = CODE_FOR_nothing
;
5107 sync_new_nand_optab
[i
] = CODE_FOR_nothing
;
5108 sync_compare_and_swap
[i
] = CODE_FOR_nothing
;
5109 sync_compare_and_swap_cc
[i
] = CODE_FOR_nothing
;
5110 sync_lock_test_and_set
[i
] = CODE_FOR_nothing
;
5111 sync_lock_release
[i
] = CODE_FOR_nothing
;
5113 #ifdef HAVE_SECONDARY_RELOADS
5114 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5118 /* Fill in the optabs with the insns we support. */
5121 /* Initialize the optabs with the names of the library functions. */
5122 init_integral_libfuncs (add_optab
, "add", '3');
5123 init_floating_libfuncs (add_optab
, "add", '3');
5124 init_integral_libfuncs (addv_optab
, "addv", '3');
5125 init_floating_libfuncs (addv_optab
, "add", '3');
5126 init_integral_libfuncs (sub_optab
, "sub", '3');
5127 init_floating_libfuncs (sub_optab
, "sub", '3');
5128 init_integral_libfuncs (subv_optab
, "subv", '3');
5129 init_floating_libfuncs (subv_optab
, "sub", '3');
5130 init_integral_libfuncs (smul_optab
, "mul", '3');
5131 init_floating_libfuncs (smul_optab
, "mul", '3');
5132 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5133 init_floating_libfuncs (smulv_optab
, "mul", '3');
5134 init_integral_libfuncs (sdiv_optab
, "div", '3');
5135 init_floating_libfuncs (sdiv_optab
, "div", '3');
5136 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5137 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5138 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5139 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5140 init_integral_libfuncs (smod_optab
, "mod", '3');
5141 init_integral_libfuncs (umod_optab
, "umod", '3');
5142 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5143 init_integral_libfuncs (and_optab
, "and", '3');
5144 init_integral_libfuncs (ior_optab
, "ior", '3');
5145 init_integral_libfuncs (xor_optab
, "xor", '3');
5146 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5147 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5148 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5149 init_integral_libfuncs (smin_optab
, "min", '3');
5150 init_floating_libfuncs (smin_optab
, "min", '3');
5151 init_integral_libfuncs (smax_optab
, "max", '3');
5152 init_floating_libfuncs (smax_optab
, "max", '3');
5153 init_integral_libfuncs (umin_optab
, "umin", '3');
5154 init_integral_libfuncs (umax_optab
, "umax", '3');
5155 init_integral_libfuncs (neg_optab
, "neg", '2');
5156 init_floating_libfuncs (neg_optab
, "neg", '2');
5157 init_integral_libfuncs (negv_optab
, "negv", '2');
5158 init_floating_libfuncs (negv_optab
, "neg", '2');
5159 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5160 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5161 init_integral_libfuncs (clz_optab
, "clz", '2');
5162 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5163 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5164 init_integral_libfuncs (parity_optab
, "parity", '2');
5166 /* Comparison libcalls for integers MUST come in pairs,
5168 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5169 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5170 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5172 /* EQ etc are floating point only. */
5173 init_floating_libfuncs (eq_optab
, "eq", '2');
5174 init_floating_libfuncs (ne_optab
, "ne", '2');
5175 init_floating_libfuncs (gt_optab
, "gt", '2');
5176 init_floating_libfuncs (ge_optab
, "ge", '2');
5177 init_floating_libfuncs (lt_optab
, "lt", '2');
5178 init_floating_libfuncs (le_optab
, "le", '2');
5179 init_floating_libfuncs (unord_optab
, "unord", '2');
5181 init_floating_libfuncs (powi_optab
, "powi", '2');
5184 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5185 MODE_INT
, MODE_FLOAT
);
5186 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5187 MODE_FLOAT
, MODE_INT
);
5188 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5189 MODE_FLOAT
, MODE_INT
);
5191 /* sext_optab is also used for FLOAT_EXTEND. */
5192 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5193 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5195 /* Use cabs for double complex abs, since systems generally have cabs.
5196 Don't define any libcall for float complex, so that cabs will be used. */
5197 if (complex_double_type_node
)
5198 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5199 = init_one_libfunc ("cabs");
5201 /* The ffs function operates on `int'. */
5202 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5203 = init_one_libfunc ("ffs");
5205 abort_libfunc
= init_one_libfunc ("abort");
5206 memcpy_libfunc
= init_one_libfunc ("memcpy");
5207 memmove_libfunc
= init_one_libfunc ("memmove");
5208 memcmp_libfunc
= init_one_libfunc ("memcmp");
5209 memset_libfunc
= init_one_libfunc ("memset");
5210 setbits_libfunc
= init_one_libfunc ("__setbits");
5212 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5213 ? "_Unwind_SjLj_Resume"
5214 : "_Unwind_Resume");
5215 #ifndef DONT_USE_BUILTIN_SETJMP
5216 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5217 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5219 setjmp_libfunc
= init_one_libfunc ("setjmp");
5220 longjmp_libfunc
= init_one_libfunc ("longjmp");
5222 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5223 unwind_sjlj_unregister_libfunc
5224 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5226 /* For function entry/exit instrumentation. */
5227 profile_function_entry_libfunc
5228 = init_one_libfunc ("__cyg_profile_func_enter");
5229 profile_function_exit_libfunc
5230 = init_one_libfunc ("__cyg_profile_func_exit");
5232 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5234 if (HAVE_conditional_trap
)
5235 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5237 /* Allow the target to add more libcalls or rename some, etc. */
5238 targetm
.init_libfuncs ();
5243 /* Print information about the current contents of the optabs on
5247 debug_optab_libfuncs (void)
5253 /* Dump the arithmetic optabs. */
5254 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5255 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5258 struct optab_handlers
*h
;
5261 h
= &o
->handlers
[j
];
5264 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5265 fprintf (stderr
, "%s\t%s:\t%s\n",
5266 GET_RTX_NAME (o
->code
),
5268 XSTR (h
->libfunc
, 0));
5272 /* Dump the conversion optabs. */
5273 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5274 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5275 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5278 struct optab_handlers
*h
;
5280 o
= &convert_optab_table
[i
];
5281 h
= &o
->handlers
[j
][k
];
5284 gcc_assert (GET_CODE (h
->libfunc
) = SYMBOL_REF
);
5285 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5286 GET_RTX_NAME (o
->code
),
5289 XSTR (h
->libfunc
, 0));
5297 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5298 CODE. Return 0 on failure. */
5301 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5302 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5304 enum machine_mode mode
= GET_MODE (op1
);
5305 enum insn_code icode
;
5308 if (!HAVE_conditional_trap
)
5311 if (mode
== VOIDmode
)
5314 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5315 if (icode
== CODE_FOR_nothing
)
5319 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5320 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5326 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5328 PUT_CODE (trap_rtx
, code
);
5329 gcc_assert (HAVE_conditional_trap
);
5330 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5334 insn
= get_insns ();
5341 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5342 or unsigned operation code. */
5344 static enum rtx_code
5345 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5357 code
= unsignedp
? LTU
: LT
;
5360 code
= unsignedp
? LEU
: LE
;
5363 code
= unsignedp
? GTU
: GT
;
5366 code
= unsignedp
? GEU
: GE
;
5369 case UNORDERED_EXPR
:
5400 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5401 unsigned operators. Do not generate compare instruction. */
5404 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5406 enum rtx_code rcode
;
5408 rtx rtx_op0
, rtx_op1
;
5410 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5411 ensures that condition is a relational operation. */
5412 gcc_assert (COMPARISON_CLASS_P (cond
));
5414 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5415 t_op0
= TREE_OPERAND (cond
, 0);
5416 t_op1
= TREE_OPERAND (cond
, 1);
5418 /* Expand operands. */
5419 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5420 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5422 if (!insn_data
[icode
].operand
[4].predicate (rtx_op0
, GET_MODE (rtx_op0
))
5423 && GET_MODE (rtx_op0
) != VOIDmode
)
5424 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5426 if (!insn_data
[icode
].operand
[5].predicate (rtx_op1
, GET_MODE (rtx_op1
))
5427 && GET_MODE (rtx_op1
) != VOIDmode
)
5428 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5430 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5433 /* Return insn code for VEC_COND_EXPR EXPR. */
5435 static inline enum insn_code
5436 get_vcond_icode (tree expr
, enum machine_mode mode
)
5438 enum insn_code icode
= CODE_FOR_nothing
;
5440 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5441 icode
= vcondu_gen_code
[mode
];
5443 icode
= vcond_gen_code
[mode
];
5447 /* Return TRUE iff, appropriate vector insns are available
5448 for vector cond expr expr in VMODE mode. */
5451 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5453 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5458 /* Generate insns for VEC_COND_EXPR. */
5461 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5463 enum insn_code icode
;
5464 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5465 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5466 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5468 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5469 if (icode
== CODE_FOR_nothing
)
5473 target
= gen_reg_rtx (mode
);
5475 /* Get comparison rtx. First expand both cond expr operands. */
5476 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5478 cc_op0
= XEXP (comparison
, 0);
5479 cc_op1
= XEXP (comparison
, 1);
5480 /* Expand both operands and force them in reg, if required. */
5481 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5482 NULL_RTX
, VOIDmode
, 1);
5483 if (!insn_data
[icode
].operand
[1].predicate (rtx_op1
, mode
)
5484 && mode
!= VOIDmode
)
5485 rtx_op1
= force_reg (mode
, rtx_op1
);
5487 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5488 NULL_RTX
, VOIDmode
, 1);
5489 if (!insn_data
[icode
].operand
[2].predicate (rtx_op2
, mode
)
5490 && mode
!= VOIDmode
)
5491 rtx_op2
= force_reg (mode
, rtx_op2
);
5493 /* Emit instruction! */
5494 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5495 comparison
, cc_op0
, cc_op1
));
5501 /* This is an internal subroutine of the other compare_and_swap expanders.
5502 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5503 operation. TARGET is an optional place to store the value result of
5504 the operation. ICODE is the particular instruction to expand. Return
5505 the result of the operation. */
5508 expand_val_compare_and_swap_1 (rtx mem
, rtx old_val
, rtx new_val
,
5509 rtx target
, enum insn_code icode
)
5511 enum machine_mode mode
= GET_MODE (mem
);
5514 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5515 target
= gen_reg_rtx (mode
);
5517 if (GET_MODE (old_val
) != VOIDmode
&& GET_MODE (old_val
) != mode
)
5518 old_val
= convert_modes (mode
, GET_MODE (old_val
), old_val
, 1);
5519 if (!insn_data
[icode
].operand
[2].predicate (old_val
, mode
))
5520 old_val
= force_reg (mode
, old_val
);
5522 if (GET_MODE (new_val
) != VOIDmode
&& GET_MODE (new_val
) != mode
)
5523 new_val
= convert_modes (mode
, GET_MODE (new_val
), new_val
, 1);
5524 if (!insn_data
[icode
].operand
[3].predicate (new_val
, mode
))
5525 new_val
= force_reg (mode
, new_val
);
5527 insn
= GEN_FCN (icode
) (target
, mem
, old_val
, new_val
);
5528 if (insn
== NULL_RTX
)
5535 /* Expand a compare-and-swap operation and return its value. */
5538 expand_val_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5540 enum machine_mode mode
= GET_MODE (mem
);
5541 enum insn_code icode
= sync_compare_and_swap
[mode
];
5543 if (icode
== CODE_FOR_nothing
)
5546 return expand_val_compare_and_swap_1 (mem
, old_val
, new_val
, target
, icode
);
5549 /* Expand a compare-and-swap operation and store true into the result if
5550 the operation was successful and false otherwise. Return the result.
5551 Unlike other routines, TARGET is not optional. */
5554 expand_bool_compare_and_swap (rtx mem
, rtx old_val
, rtx new_val
, rtx target
)
5556 enum machine_mode mode
= GET_MODE (mem
);
5557 enum insn_code icode
;
5558 rtx subtarget
, label0
, label1
;
5560 /* If the target supports a compare-and-swap pattern that simultaneously
5561 sets some flag for success, then use it. Otherwise use the regular
5562 compare-and-swap and follow that immediately with a compare insn. */
5563 icode
= sync_compare_and_swap_cc
[mode
];
5567 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5569 if (subtarget
!= NULL_RTX
)
5573 case CODE_FOR_nothing
:
5574 icode
= sync_compare_and_swap
[mode
];
5575 if (icode
== CODE_FOR_nothing
)
5578 /* Ensure that if old_val == mem, that we're not comparing
5579 against an old value. */
5580 if (MEM_P (old_val
))
5581 old_val
= force_reg (mode
, old_val
);
5583 subtarget
= expand_val_compare_and_swap_1 (mem
, old_val
, new_val
,
5585 if (subtarget
== NULL_RTX
)
5588 emit_cmp_insn (subtarget
, old_val
, EQ
, const0_rtx
, mode
, true);
5591 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5592 setcc instruction from the beginning. We don't work too hard here,
5593 but it's nice to not be stupid about initial code gen either. */
5594 if (STORE_FLAG_VALUE
== 1)
5596 icode
= setcc_gen_code
[EQ
];
5597 if (icode
!= CODE_FOR_nothing
)
5599 enum machine_mode cmode
= insn_data
[icode
].operand
[0].mode
;
5603 if (!insn_data
[icode
].operand
[0].predicate (target
, cmode
))
5604 subtarget
= gen_reg_rtx (cmode
);
5606 insn
= GEN_FCN (icode
) (subtarget
);
5610 if (GET_MODE (target
) != GET_MODE (subtarget
))
5612 convert_move (target
, subtarget
, 1);
5620 /* Without an appropriate setcc instruction, use a set of branches to
5621 get 1 and 0 stored into target. Presumably if the target has a
5622 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5624 label0
= gen_label_rtx ();
5625 label1
= gen_label_rtx ();
5627 emit_jump_insn (bcc_gen_fctn
[EQ
] (label0
));
5628 emit_move_insn (target
, const0_rtx
);
5629 emit_jump_insn (gen_jump (label1
));
5630 emit_label (label0
);
5631 emit_move_insn (target
, const1_rtx
);
5632 emit_label (label1
);
5637 /* This is a helper function for the other atomic operations. This function
5638 emits a loop that contains SEQ that iterates until a compare-and-swap
5639 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5640 a set of instructions that takes a value from OLD_REG as an input and
5641 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5642 set to the current contents of MEM. After SEQ, a compare-and-swap will
5643 attempt to update MEM with NEW_REG. The function returns true when the
5644 loop was generated successfully. */
5647 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5649 enum machine_mode mode
= GET_MODE (mem
);
5650 enum insn_code icode
;
5651 rtx label
, cmp_reg
, subtarget
;
5653 /* The loop we want to generate looks like
5659 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5660 if (cmp_reg != old_reg)
5663 Note that we only do the plain load from memory once. Subsequent
5664 iterations use the value loaded by the compare-and-swap pattern. */
5666 label
= gen_label_rtx ();
5667 cmp_reg
= gen_reg_rtx (mode
);
5669 emit_move_insn (cmp_reg
, mem
);
5671 emit_move_insn (old_reg
, cmp_reg
);
5675 /* If the target supports a compare-and-swap pattern that simultaneously
5676 sets some flag for success, then use it. Otherwise use the regular
5677 compare-and-swap and follow that immediately with a compare insn. */
5678 icode
= sync_compare_and_swap_cc
[mode
];
5682 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5684 if (subtarget
!= NULL_RTX
)
5686 gcc_assert (subtarget
== cmp_reg
);
5691 case CODE_FOR_nothing
:
5692 icode
= sync_compare_and_swap
[mode
];
5693 if (icode
== CODE_FOR_nothing
)
5696 subtarget
= expand_val_compare_and_swap_1 (mem
, old_reg
, new_reg
,
5698 if (subtarget
== NULL_RTX
)
5700 if (subtarget
!= cmp_reg
)
5701 emit_move_insn (cmp_reg
, subtarget
);
5703 emit_cmp_insn (cmp_reg
, old_reg
, EQ
, const0_rtx
, mode
, true);
5706 /* ??? Mark this jump predicted not taken? */
5707 emit_jump_insn (bcc_gen_fctn
[NE
] (label
));
5712 /* This function generates the atomic operation MEM CODE= VAL. In this
5713 case, we do not care about any resulting value. Returns NULL if we
5714 cannot generate the operation. */
5717 expand_sync_operation (rtx mem
, rtx val
, enum rtx_code code
)
5719 enum machine_mode mode
= GET_MODE (mem
);
5720 enum insn_code icode
;
5723 /* Look to see if the target supports the operation directly. */
5727 icode
= sync_add_optab
[mode
];
5730 icode
= sync_ior_optab
[mode
];
5733 icode
= sync_xor_optab
[mode
];
5736 icode
= sync_and_optab
[mode
];
5739 icode
= sync_nand_optab
[mode
];
5743 icode
= sync_sub_optab
[mode
];
5744 if (icode
== CODE_FOR_nothing
)
5746 icode
= sync_add_optab
[mode
];
5747 if (icode
!= CODE_FOR_nothing
)
5749 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5759 /* Generate the direct operation, if present. */
5760 if (icode
!= CODE_FOR_nothing
)
5762 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5763 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5764 if (!insn_data
[icode
].operand
[1].predicate (val
, mode
))
5765 val
= force_reg (mode
, val
);
5767 insn
= GEN_FCN (icode
) (mem
, val
);
5775 /* Failing that, generate a compare-and-swap loop in which we perform the
5776 operation with normal arithmetic instructions. */
5777 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5779 rtx t0
= gen_reg_rtx (mode
), t1
;
5786 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5789 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5790 true, OPTAB_LIB_WIDEN
);
5792 insn
= get_insns ();
5795 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5802 /* This function generates the atomic operation MEM CODE= VAL. In this
5803 case, we do care about the resulting value: if AFTER is true then
5804 return the value MEM holds after the operation, if AFTER is false
5805 then return the value MEM holds before the operation. TARGET is an
5806 optional place for the result value to be stored. */
5809 expand_sync_fetch_operation (rtx mem
, rtx val
, enum rtx_code code
,
5810 bool after
, rtx target
)
5812 enum machine_mode mode
= GET_MODE (mem
);
5813 enum insn_code old_code
, new_code
, icode
;
5817 /* Look to see if the target supports the operation directly. */
5821 old_code
= sync_old_add_optab
[mode
];
5822 new_code
= sync_new_add_optab
[mode
];
5825 old_code
= sync_old_ior_optab
[mode
];
5826 new_code
= sync_new_ior_optab
[mode
];
5829 old_code
= sync_old_xor_optab
[mode
];
5830 new_code
= sync_new_xor_optab
[mode
];
5833 old_code
= sync_old_and_optab
[mode
];
5834 new_code
= sync_new_and_optab
[mode
];
5837 old_code
= sync_old_nand_optab
[mode
];
5838 new_code
= sync_new_nand_optab
[mode
];
5842 old_code
= sync_old_sub_optab
[mode
];
5843 new_code
= sync_new_sub_optab
[mode
];
5844 if (old_code
== CODE_FOR_nothing
&& new_code
== CODE_FOR_nothing
)
5846 old_code
= sync_old_add_optab
[mode
];
5847 new_code
= sync_new_add_optab
[mode
];
5848 if (old_code
!= CODE_FOR_nothing
|| new_code
!= CODE_FOR_nothing
)
5850 val
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, 1);
5860 /* If the target does supports the proper new/old operation, great. But
5861 if we only support the opposite old/new operation, check to see if we
5862 can compensate. In the case in which the old value is supported, then
5863 we can always perform the operation again with normal arithmetic. In
5864 the case in which the new value is supported, then we can only handle
5865 this in the case the operation is reversible. */
5870 if (icode
== CODE_FOR_nothing
)
5873 if (icode
!= CODE_FOR_nothing
)
5880 if (icode
== CODE_FOR_nothing
5881 && (code
== PLUS
|| code
== MINUS
|| code
== XOR
))
5884 if (icode
!= CODE_FOR_nothing
)
5889 /* If we found something supported, great. */
5890 if (icode
!= CODE_FOR_nothing
)
5892 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5893 target
= gen_reg_rtx (mode
);
5895 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5896 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5897 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5898 val
= force_reg (mode
, val
);
5900 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5905 /* If we need to compensate for using an operation with the
5906 wrong return value, do so now. */
5913 else if (code
== MINUS
)
5918 target
= expand_simple_unop (mode
, NOT
, target
, NULL_RTX
, true);
5919 target
= expand_simple_binop (mode
, code
, target
, val
, NULL_RTX
,
5920 true, OPTAB_LIB_WIDEN
);
5927 /* Failing that, generate a compare-and-swap loop in which we perform the
5928 operation with normal arithmetic instructions. */
5929 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5931 rtx t0
= gen_reg_rtx (mode
), t1
;
5933 if (!target
|| !register_operand (target
, mode
))
5934 target
= gen_reg_rtx (mode
);
5939 emit_move_insn (target
, t0
);
5943 t1
= expand_simple_unop (mode
, NOT
, t1
, NULL_RTX
, true);
5946 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
,
5947 true, OPTAB_LIB_WIDEN
);
5949 emit_move_insn (target
, t1
);
5951 insn
= get_insns ();
5954 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
5961 /* This function expands a test-and-set operation. Ideally we atomically
5962 store VAL in MEM and return the previous value in MEM. Some targets
5963 may not support this operation and only support VAL with the constant 1;
5964 in this case while the return value will be 0/1, but the exact value
5965 stored in MEM is target defined. TARGET is an option place to stick
5966 the return value. */
5969 expand_sync_lock_test_and_set (rtx mem
, rtx val
, rtx target
)
5971 enum machine_mode mode
= GET_MODE (mem
);
5972 enum insn_code icode
;
5975 /* If the target supports the test-and-set directly, great. */
5976 icode
= sync_lock_test_and_set
[mode
];
5977 if (icode
!= CODE_FOR_nothing
)
5979 if (!target
|| !insn_data
[icode
].operand
[0].predicate (target
, mode
))
5980 target
= gen_reg_rtx (mode
);
5982 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
5983 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
5984 if (!insn_data
[icode
].operand
[2].predicate (val
, mode
))
5985 val
= force_reg (mode
, val
);
5987 insn
= GEN_FCN (icode
) (target
, mem
, val
);
5995 /* Otherwise, use a compare-and-swap loop for the exchange. */
5996 if (sync_compare_and_swap
[mode
] != CODE_FOR_nothing
)
5998 if (!target
|| !register_operand (target
, mode
))
5999 target
= gen_reg_rtx (mode
);
6000 if (GET_MODE (val
) != VOIDmode
&& GET_MODE (val
) != mode
)
6001 val
= convert_modes (mode
, GET_MODE (val
), val
, 1);
6002 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6009 #include "gt-optabs.h"